aboutsummaryrefslogtreecommitdiffstats
path: root/hw/intc
diff options
context:
space:
mode:
Diffstat (limited to 'hw/intc')
-rw-r--r--hw/intc/Kconfig75
-rw-r--r--hw/intc/allwinner-a10-pic.c215
-rw-r--r--hw/intc/apic.c928
-rw-r--r--hw/intc/apic_common.c497
-rw-r--r--hw/intc/arm_gic.c2146
-rw-r--r--hw/intc/arm_gic_common.c394
-rw-r--r--hw/intc/arm_gic_kvm.c619
-rw-r--r--hw/intc/arm_gicv2m.c200
-rw-r--r--hw/intc/arm_gicv3.c420
-rw-r--r--hw/intc/arm_gicv3_common.c561
-rw-r--r--hw/intc/arm_gicv3_cpuif.c2700
-rw-r--r--hw/intc/arm_gicv3_dist.c914
-rw-r--r--hw/intc/arm_gicv3_its.c1323
-rw-r--r--hw/intc/arm_gicv3_its_common.c159
-rw-r--r--hw/intc/arm_gicv3_its_kvm.c266
-rw-r--r--hw/intc/arm_gicv3_kvm.c900
-rw-r--r--hw/intc/arm_gicv3_redist.c738
-rw-r--r--hw/intc/armv7m_nvic.c2735
-rw-r--r--hw/intc/aspeed_vic.c363
-rw-r--r--hw/intc/bcm2835_ic.c243
-rw-r--r--hw/intc/bcm2836_control.c408
-rw-r--r--hw/intc/etraxfs_pic.c172
-rw-r--r--hw/intc/exynos4210_combiner.c461
-rw-r--r--hw/intc/exynos4210_gic.c482
-rw-r--r--hw/intc/gic_internal.h322
-rw-r--r--hw/intc/gicv3_internal.h611
-rw-r--r--hw/intc/goldfish_pic.c219
-rw-r--r--hw/intc/grlib_irqmp.c362
-rw-r--r--hw/intc/heathrow_pic.c210
-rw-r--r--hw/intc/i8259.c466
-rw-r--r--hw/intc/i8259_common.c219
-rw-r--r--hw/intc/imx_avic.c366
-rw-r--r--hw/intc/imx_gpcv2.c126
-rw-r--r--hw/intc/intc.c41
-rw-r--r--hw/intc/ioapic.c513
-rw-r--r--hw/intc/ioapic_common.c224
-rw-r--r--hw/intc/loongson_liointc.c249
-rw-r--r--hw/intc/m68k_irqc.c119
-rw-r--r--hw/intc/meson.build59
-rw-r--r--hw/intc/mips_gic.c468
-rw-r--r--hw/intc/omap_intc.c690
-rw-r--r--hw/intc/ompic.c181
-rw-r--r--hw/intc/openpic.c1645
-rw-r--r--hw/intc/openpic_kvm.c294
-rw-r--r--hw/intc/pl190.c297
-rw-r--r--hw/intc/pnv_xive.c1987
-rw-r--r--hw/intc/pnv_xive_regs.h248
-rw-r--r--hw/intc/ppc-uic.c321
-rw-r--r--hw/intc/realview_gic.c86
-rw-r--r--hw/intc/riscv_aclint.c460
-rw-r--r--hw/intc/rx_icu.c395
-rw-r--r--hw/intc/s390_flic.c503
-rw-r--r--hw/intc/s390_flic_kvm.c679
-rw-r--r--hw/intc/sh_intc.c449
-rw-r--r--hw/intc/sifive_plic.c563
-rw-r--r--hw/intc/slavio_intctl.c475
-rw-r--r--hw/intc/spapr_xive.c1830
-rw-r--r--hw/intc/spapr_xive_kvm.c869
-rw-r--r--hw/intc/trace-events248
-rw-r--r--hw/intc/trace.h1
-rw-r--r--hw/intc/vgic_common.h35
-rw-r--r--hw/intc/xics.c751
-rw-r--r--hw/intc/xics_kvm.c509
-rw-r--r--hw/intc/xics_pnv.c202
-rw-r--r--hw/intc/xics_spapr.c476
-rw-r--r--hw/intc/xilinx_intc.c206
-rw-r--r--hw/intc/xive.c1983
-rw-r--r--hw/intc/xlnx-pmu-iomod-intc.c558
-rw-r--r--hw/intc/xlnx-zynqmp-ipi.c380
69 files changed, 39814 insertions, 0 deletions
diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig
new file mode 100644
index 000000000..78aed93c4
--- /dev/null
+++ b/hw/intc/Kconfig
@@ -0,0 +1,75 @@
+config HEATHROW_PIC
+ bool
+
+config I8259
+ bool
+ select ISA_BUS
+
+config PL190
+ bool
+
+config IOAPIC
+ bool
+ select I8259
+
+config ARM_GIC
+ bool
+ select MSI_NONBROKEN
+
+config OPENPIC
+ bool
+ select MSI_NONBROKEN
+
+config APIC
+ bool
+ select MSI_NONBROKEN
+ select I8259
+
+config ARM_GIC_KVM
+ bool
+ default y
+ depends on ARM_GIC && KVM
+
+config XICS
+ bool
+
+config XIVE
+ bool
+
+config ALLWINNER_A10_PIC
+ bool
+
+config S390_FLIC
+ bool
+
+config S390_FLIC_KVM
+ bool
+ default y
+ depends on S390_FLIC && KVM
+
+config OMPIC
+ bool
+
+config PPC_UIC
+ bool
+
+config SH_INTC
+ bool
+
+config RX_ICU
+ bool
+
+config LOONGSON_LIOINTC
+ bool
+
+config RISCV_ACLINT
+ bool
+
+config SIFIVE_PLIC
+ bool
+
+config GOLDFISH_PIC
+ bool
+
+config M68K_IRQC
+ bool
diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c
new file mode 100644
index 000000000..8cca12480
--- /dev/null
+++ b/hw/intc/allwinner-a10-pic.c
@@ -0,0 +1,215 @@
+/*
+ * Allwinner A10 interrupt controller device emulation
+ *
+ * Copyright (C) 2013 Li Guang
+ * Written by Li Guang <lig.fnst@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "hw/intc/allwinner-a10-pic.h"
+#include "hw/irq.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+
+static void aw_a10_pic_update(AwA10PICState *s)
+{
+ uint8_t i;
+ int irq = 0, fiq = 0, zeroes;
+
+ s->vector = 0;
+
+ for (i = 0; i < AW_A10_PIC_REG_NUM; i++) {
+ irq |= s->irq_pending[i] & ~s->mask[i];
+ fiq |= s->select[i] & s->irq_pending[i] & ~s->mask[i];
+
+ if (!s->vector) {
+ zeroes = ctz32(s->irq_pending[i] & ~s->mask[i]);
+ if (zeroes != 32) {
+ s->vector = (i * 32 + zeroes) * 4;
+ }
+ }
+ }
+
+ qemu_set_irq(s->parent_irq, !!irq);
+ qemu_set_irq(s->parent_fiq, !!fiq);
+}
+
+static void aw_a10_pic_set_irq(void *opaque, int irq, int level)
+{
+ AwA10PICState *s = opaque;
+
+ if (level) {
+ set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]);
+ } else {
+ clear_bit(irq % 32, (void *)&s->irq_pending[irq / 32]);
+ }
+ aw_a10_pic_update(s);
+}
+
+static uint64_t aw_a10_pic_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AwA10PICState *s = opaque;
+ uint8_t index = (offset & 0xc) / 4;
+
+ switch (offset) {
+ case AW_A10_PIC_VECTOR:
+ return s->vector;
+ case AW_A10_PIC_BASE_ADDR:
+ return s->base_addr;
+ case AW_A10_PIC_PROTECT:
+ return s->protect;
+ case AW_A10_PIC_NMI:
+ return s->nmi;
+ case AW_A10_PIC_IRQ_PENDING ... AW_A10_PIC_IRQ_PENDING + 8:
+ return s->irq_pending[index];
+ case AW_A10_PIC_FIQ_PENDING ... AW_A10_PIC_FIQ_PENDING + 8:
+ return s->fiq_pending[index];
+ case AW_A10_PIC_SELECT ... AW_A10_PIC_SELECT + 8:
+ return s->select[index];
+ case AW_A10_PIC_ENABLE ... AW_A10_PIC_ENABLE + 8:
+ return s->enable[index];
+ case AW_A10_PIC_MASK ... AW_A10_PIC_MASK + 8:
+ return s->mask[index];
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%x\n", __func__, (int)offset);
+ break;
+ }
+
+ return 0;
+}
+
+static void aw_a10_pic_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ AwA10PICState *s = opaque;
+ uint8_t index = (offset & 0xc) / 4;
+
+ switch (offset) {
+ case AW_A10_PIC_BASE_ADDR:
+ s->base_addr = value & ~0x3;
+ break;
+ case AW_A10_PIC_PROTECT:
+ s->protect = value;
+ break;
+ case AW_A10_PIC_NMI:
+ s->nmi = value;
+ break;
+ case AW_A10_PIC_IRQ_PENDING ... AW_A10_PIC_IRQ_PENDING + 8:
+ /*
+ * The register is read-only; nevertheless, Linux (including
+ * the version originally shipped by Allwinner) pretends to
+ * write to the register. Just ignore it.
+ */
+ break;
+ case AW_A10_PIC_FIQ_PENDING ... AW_A10_PIC_FIQ_PENDING + 8:
+ s->fiq_pending[index] &= ~value;
+ break;
+ case AW_A10_PIC_SELECT ... AW_A10_PIC_SELECT + 8:
+ s->select[index] = value;
+ break;
+ case AW_A10_PIC_ENABLE ... AW_A10_PIC_ENABLE + 8:
+ s->enable[index] = value;
+ break;
+ case AW_A10_PIC_MASK ... AW_A10_PIC_MASK + 8:
+ s->mask[index] = value;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%x\n", __func__, (int)offset);
+ break;
+ }
+
+ aw_a10_pic_update(s);
+}
+
+static const MemoryRegionOps aw_a10_pic_ops = {
+ .read = aw_a10_pic_read,
+ .write = aw_a10_pic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const VMStateDescription vmstate_aw_a10_pic = {
+ .name = "a10.pic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(vector, AwA10PICState),
+ VMSTATE_UINT32(base_addr, AwA10PICState),
+ VMSTATE_UINT32(protect, AwA10PICState),
+ VMSTATE_UINT32(nmi, AwA10PICState),
+ VMSTATE_UINT32_ARRAY(irq_pending, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_UINT32_ARRAY(fiq_pending, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_UINT32_ARRAY(enable, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_UINT32_ARRAY(select, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_UINT32_ARRAY(mask, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void aw_a10_pic_init(Object *obj)
+{
+ AwA10PICState *s = AW_A10_PIC(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
+
+ qdev_init_gpio_in(DEVICE(dev), aw_a10_pic_set_irq, AW_A10_PIC_INT_NR);
+ sysbus_init_irq(dev, &s->parent_irq);
+ sysbus_init_irq(dev, &s->parent_fiq);
+ memory_region_init_io(&s->iomem, OBJECT(s), &aw_a10_pic_ops, s,
+ TYPE_AW_A10_PIC, 0x400);
+ sysbus_init_mmio(dev, &s->iomem);
+}
+
+static void aw_a10_pic_reset(DeviceState *d)
+{
+ AwA10PICState *s = AW_A10_PIC(d);
+ uint8_t i;
+
+ s->base_addr = 0;
+ s->protect = 0;
+ s->nmi = 0;
+ s->vector = 0;
+ for (i = 0; i < AW_A10_PIC_REG_NUM; i++) {
+ s->irq_pending[i] = 0;
+ s->fiq_pending[i] = 0;
+ s->select[i] = 0;
+ s->enable[i] = 0;
+ s->mask[i] = 0;
+ }
+}
+
+static void aw_a10_pic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = aw_a10_pic_reset;
+ dc->desc = "allwinner a10 pic";
+ dc->vmsd = &vmstate_aw_a10_pic;
+ }
+
+static const TypeInfo aw_a10_pic_info = {
+ .name = TYPE_AW_A10_PIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AwA10PICState),
+ .instance_init = aw_a10_pic_init,
+ .class_init = aw_a10_pic_class_init,
+};
+
+static void aw_a10_register_types(void)
+{
+ type_register_static(&aw_a10_pic_info);
+}
+
+type_init(aw_a10_register_types);
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
new file mode 100644
index 000000000..3df11c34d
--- /dev/null
+++ b/hw/intc/apic.c
@@ -0,0 +1,928 @@
+/*
+ * APIC support
+ *
+ * Copyright (c) 2004-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+#include "qemu/osdep.h"
+#include "qemu/thread.h"
+#include "hw/i386/apic_internal.h"
+#include "hw/i386/apic.h"
+#include "hw/i386/ioapic.h"
+#include "hw/intc/i8259.h"
+#include "hw/pci/msi.h"
+#include "qemu/host-utils.h"
+#include "sysemu/kvm.h"
+#include "trace.h"
+#include "hw/i386/apic-msidef.h"
+#include "qapi/error.h"
+#include "qom/object.h"
+
+#define MAX_APICS 255
+#define MAX_APIC_WORDS 8
+
+#define SYNC_FROM_VAPIC 0x1
+#define SYNC_TO_VAPIC 0x2
+#define SYNC_ISR_IRR_TO_VAPIC 0x4
+
+static APICCommonState *local_apics[MAX_APICS + 1];
+
+#define TYPE_APIC "apic"
+/*This is reusing the APICCommonState typedef from APIC_COMMON */
+DECLARE_INSTANCE_CHECKER(APICCommonState, APIC,
+ TYPE_APIC)
+
+static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode);
+static void apic_update_irq(APICCommonState *s);
+static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
+ uint8_t dest, uint8_t dest_mode);
+
+/* Find first bit starting from msb */
+static int apic_fls_bit(uint32_t value)
+{
+ return 31 - clz32(value);
+}
+
+/* Find first bit starting from lsb */
+static int apic_ffs_bit(uint32_t value)
+{
+ return ctz32(value);
+}
+
+static inline void apic_reset_bit(uint32_t *tab, int index)
+{
+ int i, mask;
+ i = index >> 5;
+ mask = 1 << (index & 0x1f);
+ tab[i] &= ~mask;
+}
+
+/* return -1 if no bit is set */
+static int get_highest_priority_int(uint32_t *tab)
+{
+ int i;
+ for (i = 7; i >= 0; i--) {
+ if (tab[i] != 0) {
+ return i * 32 + apic_fls_bit(tab[i]);
+ }
+ }
+ return -1;
+}
+
+static void apic_sync_vapic(APICCommonState *s, int sync_type)
+{
+ VAPICState vapic_state;
+ size_t length;
+ off_t start;
+ int vector;
+
+ if (!s->vapic_paddr) {
+ return;
+ }
+ if (sync_type & SYNC_FROM_VAPIC) {
+ cpu_physical_memory_read(s->vapic_paddr, &vapic_state,
+ sizeof(vapic_state));
+ s->tpr = vapic_state.tpr;
+ }
+ if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) {
+ start = offsetof(VAPICState, isr);
+ length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr);
+
+ if (sync_type & SYNC_TO_VAPIC) {
+ assert(qemu_cpu_is_self(CPU(s->cpu)));
+
+ vapic_state.tpr = s->tpr;
+ vapic_state.enabled = 1;
+ start = 0;
+ length = sizeof(VAPICState);
+ }
+
+ vector = get_highest_priority_int(s->isr);
+ if (vector < 0) {
+ vector = 0;
+ }
+ vapic_state.isr = vector & 0xf0;
+
+ vapic_state.zero = 0;
+
+ vector = get_highest_priority_int(s->irr);
+ if (vector < 0) {
+ vector = 0;
+ }
+ vapic_state.irr = vector & 0xff;
+
+ address_space_write_rom(&address_space_memory,
+ s->vapic_paddr + start,
+ MEMTXATTRS_UNSPECIFIED,
+ ((void *)&vapic_state) + start, length);
+ }
+}
+
+static void apic_vapic_base_update(APICCommonState *s)
+{
+ apic_sync_vapic(s, SYNC_TO_VAPIC);
+}
+
+static void apic_local_deliver(APICCommonState *s, int vector)
+{
+ uint32_t lvt = s->lvt[vector];
+ int trigger_mode;
+
+ trace_apic_local_deliver(vector, (lvt >> 8) & 7);
+
+ if (lvt & APIC_LVT_MASKED)
+ return;
+
+ switch ((lvt >> 8) & 7) {
+ case APIC_DM_SMI:
+ cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI);
+ break;
+
+ case APIC_DM_NMI:
+ cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI);
+ break;
+
+ case APIC_DM_EXTINT:
+ cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD);
+ break;
+
+ case APIC_DM_FIXED:
+ trigger_mode = APIC_TRIGGER_EDGE;
+ if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) &&
+ (lvt & APIC_LVT_LEVEL_TRIGGER))
+ trigger_mode = APIC_TRIGGER_LEVEL;
+ apic_set_irq(s, lvt & 0xff, trigger_mode);
+ }
+}
+
+void apic_deliver_pic_intr(DeviceState *dev, int level)
+{
+ APICCommonState *s = APIC(dev);
+
+ if (level) {
+ apic_local_deliver(s, APIC_LVT_LINT0);
+ } else {
+ uint32_t lvt = s->lvt[APIC_LVT_LINT0];
+
+ switch ((lvt >> 8) & 7) {
+ case APIC_DM_FIXED:
+ if (!(lvt & APIC_LVT_LEVEL_TRIGGER))
+ break;
+ apic_reset_bit(s->irr, lvt & 0xff);
+ /* fall through */
+ case APIC_DM_EXTINT:
+ apic_update_irq(s);
+ break;
+ }
+ }
+}
+
+static void apic_external_nmi(APICCommonState *s)
+{
+ apic_local_deliver(s, APIC_LVT_LINT1);
+}
+
+#define foreach_apic(apic, deliver_bitmask, code) \
+{\
+ int __i, __j;\
+ for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\
+ uint32_t __mask = deliver_bitmask[__i];\
+ if (__mask) {\
+ for(__j = 0; __j < 32; __j++) {\
+ if (__mask & (1U << __j)) {\
+ apic = local_apics[__i * 32 + __j];\
+ if (apic) {\
+ code;\
+ }\
+ }\
+ }\
+ }\
+ }\
+}
+
+static void apic_bus_deliver(const uint32_t *deliver_bitmask,
+ uint8_t delivery_mode, uint8_t vector_num,
+ uint8_t trigger_mode)
+{
+ APICCommonState *apic_iter;
+
+ switch (delivery_mode) {
+ case APIC_DM_LOWPRI:
+ /* XXX: search for focus processor, arbitration */
+ {
+ int i, d;
+ d = -1;
+ for(i = 0; i < MAX_APIC_WORDS; i++) {
+ if (deliver_bitmask[i]) {
+ d = i * 32 + apic_ffs_bit(deliver_bitmask[i]);
+ break;
+ }
+ }
+ if (d >= 0) {
+ apic_iter = local_apics[d];
+ if (apic_iter) {
+ apic_set_irq(apic_iter, vector_num, trigger_mode);
+ }
+ }
+ }
+ return;
+
+ case APIC_DM_FIXED:
+ break;
+
+ case APIC_DM_SMI:
+ foreach_apic(apic_iter, deliver_bitmask,
+ cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI)
+ );
+ return;
+
+ case APIC_DM_NMI:
+ foreach_apic(apic_iter, deliver_bitmask,
+ cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI)
+ );
+ return;
+
+ case APIC_DM_INIT:
+ /* normal INIT IPI sent to processors */
+ foreach_apic(apic_iter, deliver_bitmask,
+ cpu_interrupt(CPU(apic_iter->cpu),
+ CPU_INTERRUPT_INIT)
+ );
+ return;
+
+ case APIC_DM_EXTINT:
+ /* handled in I/O APIC code */
+ break;
+
+ default:
+ return;
+ }
+
+ foreach_apic(apic_iter, deliver_bitmask,
+ apic_set_irq(apic_iter, vector_num, trigger_mode) );
+}
+
+void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode,
+ uint8_t vector_num, uint8_t trigger_mode)
+{
+ uint32_t deliver_bitmask[MAX_APIC_WORDS];
+
+ trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num,
+ trigger_mode);
+
+ apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
+ apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
+}
+
+static void apic_set_base(APICCommonState *s, uint64_t val)
+{
+ s->apicbase = (val & 0xfffff000) |
+ (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
+ /* if disabled, cannot be enabled again */
+ if (!(val & MSR_IA32_APICBASE_ENABLE)) {
+ s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
+ cpu_clear_apic_feature(&s->cpu->env);
+ s->spurious_vec &= ~APIC_SV_ENABLE;
+ }
+}
+
+static void apic_set_tpr(APICCommonState *s, uint8_t val)
+{
+ /* Updates from cr8 are ignored while the VAPIC is active */
+ if (!s->vapic_paddr) {
+ s->tpr = val << 4;
+ apic_update_irq(s);
+ }
+}
+
+int apic_get_highest_priority_irr(DeviceState *dev)
+{
+ APICCommonState *s;
+
+ if (!dev) {
+ /* no interrupts */
+ return -1;
+ }
+ s = APIC_COMMON(dev);
+ return get_highest_priority_int(s->irr);
+}
+
+static uint8_t apic_get_tpr(APICCommonState *s)
+{
+ apic_sync_vapic(s, SYNC_FROM_VAPIC);
+ return s->tpr >> 4;
+}
+
+int apic_get_ppr(APICCommonState *s)
+{
+ int tpr, isrv, ppr;
+
+ tpr = (s->tpr >> 4);
+ isrv = get_highest_priority_int(s->isr);
+ if (isrv < 0)
+ isrv = 0;
+ isrv >>= 4;
+ if (tpr >= isrv)
+ ppr = s->tpr;
+ else
+ ppr = isrv << 4;
+ return ppr;
+}
+
+static int apic_get_arb_pri(APICCommonState *s)
+{
+ /* XXX: arbitration */
+ return 0;
+}
+
+
+/*
+ * <0 - low prio interrupt,
+ * 0 - no interrupt,
+ * >0 - interrupt number
+ */
+static int apic_irq_pending(APICCommonState *s)
+{
+ int irrv, ppr;
+
+ if (!(s->spurious_vec & APIC_SV_ENABLE)) {
+ return 0;
+ }
+
+ irrv = get_highest_priority_int(s->irr);
+ if (irrv < 0) {
+ return 0;
+ }
+ ppr = apic_get_ppr(s);
+ if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) {
+ return -1;
+ }
+
+ return irrv;
+}
+
+/* signal the CPU if an irq is pending */
+static void apic_update_irq(APICCommonState *s)
+{
+ CPUState *cpu;
+ DeviceState *dev = (DeviceState *)s;
+
+ cpu = CPU(s->cpu);
+ if (!qemu_cpu_is_self(cpu)) {
+ cpu_interrupt(cpu, CPU_INTERRUPT_POLL);
+ } else if (apic_irq_pending(s) > 0) {
+ cpu_interrupt(cpu, CPU_INTERRUPT_HARD);
+ } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
+ }
+}
+
+void apic_poll_irq(DeviceState *dev)
+{
+ APICCommonState *s = APIC(dev);
+
+ apic_sync_vapic(s, SYNC_FROM_VAPIC);
+ apic_update_irq(s);
+}
+
+static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode)
+{
+ apic_report_irq_delivered(!apic_get_bit(s->irr, vector_num));
+
+ apic_set_bit(s->irr, vector_num);
+ if (trigger_mode)
+ apic_set_bit(s->tmr, vector_num);
+ else
+ apic_reset_bit(s->tmr, vector_num);
+ if (s->vapic_paddr) {
+ apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC);
+ /*
+ * The vcpu thread needs to see the new IRR before we pull its current
+ * TPR value. That way, if we miss a lowering of the TRP, the guest
+ * has the chance to notice the new IRR and poll for IRQs on its own.
+ */
+ smp_wmb();
+ apic_sync_vapic(s, SYNC_FROM_VAPIC);
+ }
+ apic_update_irq(s);
+}
+
+static void apic_eoi(APICCommonState *s)
+{
+ int isrv;
+ isrv = get_highest_priority_int(s->isr);
+ if (isrv < 0)
+ return;
+ apic_reset_bit(s->isr, isrv);
+ if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) {
+ ioapic_eoi_broadcast(isrv);
+ }
+ apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC);
+ apic_update_irq(s);
+}
+
+static int apic_find_dest(uint8_t dest)
+{
+ APICCommonState *apic = local_apics[dest];
+ int i;
+
+ if (apic && apic->id == dest)
+ return dest; /* shortcut in case apic->id == local_apics[dest]->id */
+
+ for (i = 0; i < MAX_APICS; i++) {
+ apic = local_apics[i];
+ if (apic && apic->id == dest)
+ return i;
+ if (!apic)
+ break;
+ }
+
+ return -1;
+}
+
+static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
+ uint8_t dest, uint8_t dest_mode)
+{
+ APICCommonState *apic_iter;
+ int i;
+
+ if (dest_mode == 0) {
+ if (dest == 0xff) {
+ memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t));
+ } else {
+ int idx = apic_find_dest(dest);
+ memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
+ if (idx >= 0)
+ apic_set_bit(deliver_bitmask, idx);
+ }
+ } else {
+ /* XXX: cluster mode */
+ memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
+ for(i = 0; i < MAX_APICS; i++) {
+ apic_iter = local_apics[i];
+ if (apic_iter) {
+ if (apic_iter->dest_mode == 0xf) {
+ if (dest & apic_iter->log_dest)
+ apic_set_bit(deliver_bitmask, i);
+ } else if (apic_iter->dest_mode == 0x0) {
+ if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) &&
+ (dest & apic_iter->log_dest & 0x0f)) {
+ apic_set_bit(deliver_bitmask, i);
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ }
+}
+
+static void apic_startup(APICCommonState *s, int vector_num)
+{
+ s->sipi_vector = vector_num;
+ cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
+}
+
+void apic_sipi(DeviceState *dev)
+{
+ APICCommonState *s = APIC(dev);
+
+ cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
+
+ if (!s->wait_for_sipi)
+ return;
+ cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector);
+ s->wait_for_sipi = 0;
+}
+
+static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode,
+ uint8_t delivery_mode, uint8_t vector_num,
+ uint8_t trigger_mode)
+{
+ APICCommonState *s = APIC(dev);
+ uint32_t deliver_bitmask[MAX_APIC_WORDS];
+ int dest_shorthand = (s->icr[0] >> 18) & 3;
+ APICCommonState *apic_iter;
+
+ switch (dest_shorthand) {
+ case 0:
+ apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
+ break;
+ case 1:
+ memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask));
+ apic_set_bit(deliver_bitmask, s->id);
+ break;
+ case 2:
+ memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
+ break;
+ case 3:
+ memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
+ apic_reset_bit(deliver_bitmask, s->id);
+ break;
+ }
+
+ switch (delivery_mode) {
+ case APIC_DM_INIT:
+ {
+ int trig_mode = (s->icr[0] >> 15) & 1;
+ int level = (s->icr[0] >> 14) & 1;
+ if (level == 0 && trig_mode == 1) {
+ foreach_apic(apic_iter, deliver_bitmask,
+ apic_iter->arb_id = apic_iter->id );
+ return;
+ }
+ }
+ break;
+
+ case APIC_DM_SIPI:
+ foreach_apic(apic_iter, deliver_bitmask,
+ apic_startup(apic_iter, vector_num) );
+ return;
+ }
+
+ apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
+}
+
+static bool apic_check_pic(APICCommonState *s)
+{
+ DeviceState *dev = (DeviceState *)s;
+
+ if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) {
+ return false;
+ }
+ apic_deliver_pic_intr(dev, 1);
+ return true;
+}
+
+int apic_get_interrupt(DeviceState *dev)
+{
+ APICCommonState *s = APIC(dev);
+ int intno;
+
+ /* if the APIC is installed or enabled, we let the 8259 handle the
+ IRQs */
+ if (!s)
+ return -1;
+ if (!(s->spurious_vec & APIC_SV_ENABLE))
+ return -1;
+
+ apic_sync_vapic(s, SYNC_FROM_VAPIC);
+ intno = apic_irq_pending(s);
+
+ /* if there is an interrupt from the 8259, let the caller handle
+ * that first since ExtINT interrupts ignore the priority.
+ */
+ if (intno == 0 || apic_check_pic(s)) {
+ apic_sync_vapic(s, SYNC_TO_VAPIC);
+ return -1;
+ } else if (intno < 0) {
+ apic_sync_vapic(s, SYNC_TO_VAPIC);
+ return s->spurious_vec & 0xff;
+ }
+ apic_reset_bit(s->irr, intno);
+ apic_set_bit(s->isr, intno);
+ apic_sync_vapic(s, SYNC_TO_VAPIC);
+
+ apic_update_irq(s);
+
+ return intno;
+}
+
+int apic_accept_pic_intr(DeviceState *dev)
+{
+ APICCommonState *s = APIC(dev);
+ uint32_t lvt0;
+
+ if (!s)
+ return -1;
+
+ lvt0 = s->lvt[APIC_LVT_LINT0];
+
+ if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 ||
+ (lvt0 & APIC_LVT_MASKED) == 0)
+ return isa_pic != NULL;
+
+ return 0;
+}
+
+static void apic_timer_update(APICCommonState *s, int64_t current_time)
+{
+ if (apic_next_timer(s, current_time)) {
+ timer_mod(s->timer, s->next_time);
+ } else {
+ timer_del(s->timer);
+ }
+}
+
+static void apic_timer(void *opaque)
+{
+ APICCommonState *s = opaque;
+
+ apic_local_deliver(s, APIC_LVT_TIMER);
+ apic_timer_update(s, s->next_time);
+}
+
+static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size)
+{
+ DeviceState *dev;
+ APICCommonState *s;
+ uint32_t val;
+ int index;
+
+ if (size < 4) {
+ return 0;
+ }
+
+ dev = cpu_get_current_apic();
+ if (!dev) {
+ return 0;
+ }
+ s = APIC(dev);
+
+ index = (addr >> 4) & 0xff;
+ switch(index) {
+ case 0x02: /* id */
+ val = s->id << 24;
+ break;
+ case 0x03: /* version */
+ val = s->version | ((APIC_LVT_NB - 1) << 16);
+ break;
+ case 0x08:
+ apic_sync_vapic(s, SYNC_FROM_VAPIC);
+ if (apic_report_tpr_access) {
+ cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ);
+ }
+ val = s->tpr;
+ break;
+ case 0x09:
+ val = apic_get_arb_pri(s);
+ break;
+ case 0x0a:
+ /* ppr */
+ val = apic_get_ppr(s);
+ break;
+ case 0x0b:
+ val = 0;
+ break;
+ case 0x0d:
+ val = s->log_dest << 24;
+ break;
+ case 0x0e:
+ val = (s->dest_mode << 28) | 0xfffffff;
+ break;
+ case 0x0f:
+ val = s->spurious_vec;
+ break;
+ case 0x10 ... 0x17:
+ val = s->isr[index & 7];
+ break;
+ case 0x18 ... 0x1f:
+ val = s->tmr[index & 7];
+ break;
+ case 0x20 ... 0x27:
+ val = s->irr[index & 7];
+ break;
+ case 0x28:
+ val = s->esr;
+ break;
+ case 0x30:
+ case 0x31:
+ val = s->icr[index & 1];
+ break;
+ case 0x32 ... 0x37:
+ val = s->lvt[index - 0x32];
+ break;
+ case 0x38:
+ val = s->initial_count;
+ break;
+ case 0x39:
+ val = apic_get_current_count(s);
+ break;
+ case 0x3e:
+ val = s->divide_conf;
+ break;
+ default:
+ s->esr |= APIC_ESR_ILLEGAL_ADDRESS;
+ val = 0;
+ break;
+ }
+ trace_apic_mem_readl(addr, val);
+ return val;
+}
+
+static void apic_send_msi(MSIMessage *msi)
+{
+ uint64_t addr = msi->address;
+ uint32_t data = msi->data;
+ uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+ uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
+ uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
+ uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
+ uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
+ /* XXX: Ignore redirection hint. */
+ apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode);
+}
+
+static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ DeviceState *dev;
+ APICCommonState *s;
+ int index = (addr >> 4) & 0xff;
+
+ if (size < 4) {
+ return;
+ }
+
+ if (addr > 0xfff || !index) {
+ /* MSI and MMIO APIC are at the same memory location,
+ * but actually not on the global bus: MSI is on PCI bus
+ * APIC is connected directly to the CPU.
+ * Mapping them on the global bus happens to work because
+ * MSI registers are reserved in APIC MMIO and vice versa. */
+ MSIMessage msi = { .address = addr, .data = val };
+ apic_send_msi(&msi);
+ return;
+ }
+
+ dev = cpu_get_current_apic();
+ if (!dev) {
+ return;
+ }
+ s = APIC(dev);
+
+ trace_apic_mem_writel(addr, val);
+
+ switch(index) {
+ case 0x02:
+ s->id = (val >> 24);
+ break;
+ case 0x03:
+ break;
+ case 0x08:
+ if (apic_report_tpr_access) {
+ cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE);
+ }
+ s->tpr = val;
+ apic_sync_vapic(s, SYNC_TO_VAPIC);
+ apic_update_irq(s);
+ break;
+ case 0x09:
+ case 0x0a:
+ break;
+ case 0x0b: /* EOI */
+ apic_eoi(s);
+ break;
+ case 0x0d:
+ s->log_dest = val >> 24;
+ break;
+ case 0x0e:
+ s->dest_mode = val >> 28;
+ break;
+ case 0x0f:
+ s->spurious_vec = val & 0x1ff;
+ apic_update_irq(s);
+ break;
+ case 0x10 ... 0x17:
+ case 0x18 ... 0x1f:
+ case 0x20 ... 0x27:
+ case 0x28:
+ break;
+ case 0x30:
+ s->icr[0] = val;
+ apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1,
+ (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff),
+ (s->icr[0] >> 15) & 1);
+ break;
+ case 0x31:
+ s->icr[1] = val;
+ break;
+ case 0x32 ... 0x37:
+ {
+ int n = index - 0x32;
+ s->lvt[n] = val;
+ if (n == APIC_LVT_TIMER) {
+ apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+ } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) {
+ apic_update_irq(s);
+ }
+ }
+ break;
+ case 0x38:
+ s->initial_count = val;
+ s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ apic_timer_update(s, s->initial_count_load_time);
+ break;
+ case 0x39:
+ break;
+ case 0x3e:
+ {
+ int v;
+ s->divide_conf = val & 0xb;
+ v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
+ s->count_shift = (v + 1) & 7;
+ }
+ break;
+ default:
+ s->esr |= APIC_ESR_ILLEGAL_ADDRESS;
+ break;
+ }
+}
+
+static void apic_pre_save(APICCommonState *s)
+{
+ apic_sync_vapic(s, SYNC_FROM_VAPIC);
+}
+
+static void apic_post_load(APICCommonState *s)
+{
+ if (s->timer_expiry != -1) {
+ timer_mod(s->timer, s->timer_expiry);
+ } else {
+ timer_del(s->timer);
+ }
+}
+
+static const MemoryRegionOps apic_io_ops = {
+ .read = apic_mem_read,
+ .write = apic_mem_write,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 4,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void apic_realize(DeviceState *dev, Error **errp)
+{
+ APICCommonState *s = APIC(dev);
+
+ if (s->id >= MAX_APICS) {
+ error_setg(errp, "%s initialization failed. APIC ID %d is invalid",
+ object_get_typename(OBJECT(dev)), s->id);
+ return;
+ }
+
+ if (kvm_enabled()) {
+ warn_report("Userspace local APIC is deprecated for KVM.");
+ warn_report("Do not use kernel-irqchip except for the -M isapc machine type.");
+ }
+
+ memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi",
+ APIC_SPACE_SIZE);
+
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
+ local_apics[s->id] = s;
+
+ msi_nonbroken = true;
+}
+
+static void apic_unrealize(DeviceState *dev)
+{
+ APICCommonState *s = APIC(dev);
+
+ timer_free(s->timer);
+ local_apics[s->id] = NULL;
+}
+
+static void apic_class_init(ObjectClass *klass, void *data)
+{
+ APICCommonClass *k = APIC_COMMON_CLASS(klass);
+
+ k->realize = apic_realize;
+ k->unrealize = apic_unrealize;
+ k->set_base = apic_set_base;
+ k->set_tpr = apic_set_tpr;
+ k->get_tpr = apic_get_tpr;
+ k->vapic_base_update = apic_vapic_base_update;
+ k->external_nmi = apic_external_nmi;
+ k->pre_save = apic_pre_save;
+ k->post_load = apic_post_load;
+ k->send_msi = apic_send_msi;
+}
+
+static const TypeInfo apic_info = {
+ .name = TYPE_APIC,
+ .instance_size = sizeof(APICCommonState),
+ .parent = TYPE_APIC_COMMON,
+ .class_init = apic_class_init,
+};
+
+static void apic_register_types(void)
+{
+ type_register_static(&apic_info);
+}
+
+type_init(apic_register_types)
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
new file mode 100644
index 000000000..2a2098206
--- /dev/null
+++ b/hw/intc/apic_common.c
@@ -0,0 +1,497 @@
+/*
+ * APIC support - common bits of emulated and KVM kernel model
+ *
+ * Copyright (c) 2004-2005 Fabrice Bellard
+ * Copyright (c) 2011 Jan Kiszka, Siemens AG
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "hw/i386/apic.h"
+#include "hw/i386/apic_internal.h"
+#include "trace.h"
+#include "hw/boards.h"
+#include "sysemu/hax.h"
+#include "sysemu/kvm.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+
+static int apic_irq_delivered;
+bool apic_report_tpr_access;
+
+void cpu_set_apic_base(DeviceState *dev, uint64_t val)
+{
+ trace_cpu_set_apic_base(val);
+
+ if (dev) {
+ APICCommonState *s = APIC_COMMON(dev);
+ APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
+ /* switching to x2APIC, reset possibly modified xAPIC ID */
+ if (!(s->apicbase & MSR_IA32_APICBASE_EXTD) &&
+ (val & MSR_IA32_APICBASE_EXTD)) {
+ s->id = s->initial_apic_id;
+ }
+ info->set_base(s, val);
+ }
+}
+
+uint64_t cpu_get_apic_base(DeviceState *dev)
+{
+ if (dev) {
+ APICCommonState *s = APIC_COMMON(dev);
+ trace_cpu_get_apic_base((uint64_t)s->apicbase);
+ return s->apicbase;
+ } else {
+ trace_cpu_get_apic_base(MSR_IA32_APICBASE_BSP);
+ return MSR_IA32_APICBASE_BSP;
+ }
+}
+
+void cpu_set_apic_tpr(DeviceState *dev, uint8_t val)
+{
+ APICCommonState *s;
+ APICCommonClass *info;
+
+ if (!dev) {
+ return;
+ }
+
+ s = APIC_COMMON(dev);
+ info = APIC_COMMON_GET_CLASS(s);
+
+ info->set_tpr(s, val);
+}
+
+uint8_t cpu_get_apic_tpr(DeviceState *dev)
+{
+ APICCommonState *s;
+ APICCommonClass *info;
+
+ if (!dev) {
+ return 0;
+ }
+
+ s = APIC_COMMON(dev);
+ info = APIC_COMMON_GET_CLASS(s);
+
+ return info->get_tpr(s);
+}
+
+void apic_enable_tpr_access_reporting(DeviceState *dev, bool enable)
+{
+ APICCommonState *s = APIC_COMMON(dev);
+ APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
+
+ apic_report_tpr_access = enable;
+ if (info->enable_tpr_reporting) {
+ info->enable_tpr_reporting(s, enable);
+ }
+}
+
+void apic_enable_vapic(DeviceState *dev, hwaddr paddr)
+{
+ APICCommonState *s = APIC_COMMON(dev);
+ APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
+
+ s->vapic_paddr = paddr;
+ info->vapic_base_update(s);
+}
+
+void apic_handle_tpr_access_report(DeviceState *dev, target_ulong ip,
+ TPRAccess access)
+{
+ APICCommonState *s = APIC_COMMON(dev);
+
+ vapic_report_tpr_access(s->vapic, CPU(s->cpu), ip, access);
+}
+
+void apic_report_irq_delivered(int delivered)
+{
+ apic_irq_delivered += delivered;
+
+ trace_apic_report_irq_delivered(apic_irq_delivered);
+}
+
+void apic_reset_irq_delivered(void)
+{
+ /* Copy this into a local variable to encourage gcc to emit a plain
+ * register for a sys/sdt.h marker. For details on this workaround, see:
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=13296
+ */
+ volatile int a_i_d = apic_irq_delivered;
+ trace_apic_reset_irq_delivered(a_i_d);
+
+ apic_irq_delivered = 0;
+}
+
+int apic_get_irq_delivered(void)
+{
+ trace_apic_get_irq_delivered(apic_irq_delivered);
+
+ return apic_irq_delivered;
+}
+
+void apic_deliver_nmi(DeviceState *dev)
+{
+ APICCommonState *s = APIC_COMMON(dev);
+ APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
+
+ info->external_nmi(s);
+}
+
+bool apic_next_timer(APICCommonState *s, int64_t current_time)
+{
+ int64_t d;
+
+ /* We need to store the timer state separately to support APIC
+ * implementations that maintain a non-QEMU timer, e.g. inside the
+ * host kernel. This open-coded state allows us to migrate between
+ * both models. */
+ s->timer_expiry = -1;
+
+ if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_MASKED) {
+ return false;
+ }
+
+ d = (current_time - s->initial_count_load_time) >> s->count_shift;
+
+ if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
+ if (!s->initial_count) {
+ return false;
+ }
+ d = ((d / ((uint64_t)s->initial_count + 1)) + 1) *
+ ((uint64_t)s->initial_count + 1);
+ } else {
+ if (d >= s->initial_count) {
+ return false;
+ }
+ d = (uint64_t)s->initial_count + 1;
+ }
+ s->next_time = s->initial_count_load_time + (d << s->count_shift);
+ s->timer_expiry = s->next_time;
+ return true;
+}
+
+uint32_t apic_get_current_count(APICCommonState *s)
+{
+ int64_t d;
+ uint32_t val;
+ d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >>
+ s->count_shift;
+ if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
+ /* periodic */
+ val = s->initial_count - (d % ((uint64_t)s->initial_count + 1));
+ } else {
+ if (d >= s->initial_count) {
+ val = 0;
+ } else {
+ val = s->initial_count - d;
+ }
+ }
+ return val;
+}
+
+void apic_init_reset(DeviceState *dev)
+{
+ APICCommonState *s;
+ APICCommonClass *info;
+ int i;
+
+ if (!dev) {
+ return;
+ }
+ s = APIC_COMMON(dev);
+ s->tpr = 0;
+ s->spurious_vec = 0xff;
+ s->log_dest = 0;
+ s->dest_mode = 0xf;
+ memset(s->isr, 0, sizeof(s->isr));
+ memset(s->tmr, 0, sizeof(s->tmr));
+ memset(s->irr, 0, sizeof(s->irr));
+ for (i = 0; i < APIC_LVT_NB; i++) {
+ s->lvt[i] = APIC_LVT_MASKED;
+ }
+ s->esr = 0;
+ memset(s->icr, 0, sizeof(s->icr));
+ s->divide_conf = 0;
+ s->count_shift = 0;
+ s->initial_count = 0;
+ s->initial_count_load_time = 0;
+ s->next_time = 0;
+ s->wait_for_sipi = !cpu_is_bsp(s->cpu);
+
+ if (s->timer) {
+ timer_del(s->timer);
+ }
+ s->timer_expiry = -1;
+
+ info = APIC_COMMON_GET_CLASS(s);
+ if (info->reset) {
+ info->reset(s);
+ }
+}
+
+void apic_designate_bsp(DeviceState *dev, bool bsp)
+{
+ if (dev == NULL) {
+ return;
+ }
+
+ APICCommonState *s = APIC_COMMON(dev);
+ if (bsp) {
+ s->apicbase |= MSR_IA32_APICBASE_BSP;
+ } else {
+ s->apicbase &= ~MSR_IA32_APICBASE_BSP;
+ }
+}
+
+static void apic_reset_common(DeviceState *dev)
+{
+ APICCommonState *s = APIC_COMMON(dev);
+ APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
+ uint32_t bsp;
+
+ bsp = s->apicbase & MSR_IA32_APICBASE_BSP;
+ s->apicbase = APIC_DEFAULT_ADDRESS | bsp | MSR_IA32_APICBASE_ENABLE;
+ s->id = s->initial_apic_id;
+
+ apic_reset_irq_delivered();
+
+ s->vapic_paddr = 0;
+ info->vapic_base_update(s);
+
+ apic_init_reset(dev);
+}
+
+static const VMStateDescription vmstate_apic_common;
+
+static void apic_common_realize(DeviceState *dev, Error **errp)
+{
+ APICCommonState *s = APIC_COMMON(dev);
+ APICCommonClass *info;
+ static DeviceState *vapic;
+ uint32_t instance_id = s->initial_apic_id;
+
+ /* Normally initial APIC ID should be no more than hundreds */
+ assert(instance_id != VMSTATE_INSTANCE_ID_ANY);
+
+ info = APIC_COMMON_GET_CLASS(s);
+ info->realize(dev, errp);
+
+ /* Note: We need at least 1M to map the VAPIC option ROM */
+ if (!vapic && s->vapic_control & VAPIC_ENABLE_MASK &&
+ !hax_enabled() && current_machine->ram_size >= 1024 * 1024) {
+ vapic = sysbus_create_simple("kvmvapic", -1, NULL);
+ }
+ s->vapic = vapic;
+ if (apic_report_tpr_access && info->enable_tpr_reporting) {
+ info->enable_tpr_reporting(s, true);
+ }
+
+ if (s->legacy_instance_id) {
+ instance_id = VMSTATE_INSTANCE_ID_ANY;
+ }
+ vmstate_register_with_alias_id(NULL, instance_id, &vmstate_apic_common,
+ s, -1, 0, NULL);
+}
+
+static void apic_common_unrealize(DeviceState *dev)
+{
+ APICCommonState *s = APIC_COMMON(dev);
+ APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
+
+ vmstate_unregister(NULL, &vmstate_apic_common, s);
+ info->unrealize(dev);
+
+ if (apic_report_tpr_access && info->enable_tpr_reporting) {
+ info->enable_tpr_reporting(s, false);
+ }
+}
+
+static int apic_pre_load(void *opaque)
+{
+ APICCommonState *s = APIC_COMMON(opaque);
+
+ /* The default is !cpu_is_bsp(s->cpu), but the common value is 0
+ * so that's what apic_common_sipi_needed checks for. Reset to
+ * the value that is assumed when the apic_sipi subsection is
+ * absent.
+ */
+ s->wait_for_sipi = 0;
+ return 0;
+}
+
+static int apic_dispatch_pre_save(void *opaque)
+{
+ APICCommonState *s = APIC_COMMON(opaque);
+ APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
+
+ if (info->pre_save) {
+ info->pre_save(s);
+ }
+
+ return 0;
+}
+
+static int apic_dispatch_post_load(void *opaque, int version_id)
+{
+ APICCommonState *s = APIC_COMMON(opaque);
+ APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
+
+ if (info->post_load) {
+ info->post_load(s);
+ }
+ return 0;
+}
+
+static bool apic_common_sipi_needed(void *opaque)
+{
+ APICCommonState *s = APIC_COMMON(opaque);
+ return s->wait_for_sipi != 0;
+}
+
+static const VMStateDescription vmstate_apic_common_sipi = {
+ .name = "apic_sipi",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = apic_common_sipi_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(sipi_vector, APICCommonState),
+ VMSTATE_INT32(wait_for_sipi, APICCommonState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_apic_common = {
+ .name = "apic",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .pre_load = apic_pre_load,
+ .pre_save = apic_dispatch_pre_save,
+ .post_load = apic_dispatch_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(apicbase, APICCommonState),
+ VMSTATE_UINT8(id, APICCommonState),
+ VMSTATE_UINT8(arb_id, APICCommonState),
+ VMSTATE_UINT8(tpr, APICCommonState),
+ VMSTATE_UINT32(spurious_vec, APICCommonState),
+ VMSTATE_UINT8(log_dest, APICCommonState),
+ VMSTATE_UINT8(dest_mode, APICCommonState),
+ VMSTATE_UINT32_ARRAY(isr, APICCommonState, 8),
+ VMSTATE_UINT32_ARRAY(tmr, APICCommonState, 8),
+ VMSTATE_UINT32_ARRAY(irr, APICCommonState, 8),
+ VMSTATE_UINT32_ARRAY(lvt, APICCommonState, APIC_LVT_NB),
+ VMSTATE_UINT32(esr, APICCommonState),
+ VMSTATE_UINT32_ARRAY(icr, APICCommonState, 2),
+ VMSTATE_UINT32(divide_conf, APICCommonState),
+ VMSTATE_INT32(count_shift, APICCommonState),
+ VMSTATE_UINT32(initial_count, APICCommonState),
+ VMSTATE_INT64(initial_count_load_time, APICCommonState),
+ VMSTATE_INT64(next_time, APICCommonState),
+ VMSTATE_INT64(timer_expiry,
+ APICCommonState), /* open-coded timer state */
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_apic_common_sipi,
+ NULL
+ }
+};
+
+static Property apic_properties_common[] = {
+ DEFINE_PROP_UINT8("version", APICCommonState, version, 0x14),
+ DEFINE_PROP_BIT("vapic", APICCommonState, vapic_control, VAPIC_ENABLE_BIT,
+ true),
+ DEFINE_PROP_BOOL("legacy-instance-id", APICCommonState, legacy_instance_id,
+ false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void apic_common_get_id(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ APICCommonState *s = APIC_COMMON(obj);
+ uint32_t value;
+
+ value = s->apicbase & MSR_IA32_APICBASE_EXTD ? s->initial_apic_id : s->id;
+ visit_type_uint32(v, name, &value, errp);
+}
+
+static void apic_common_set_id(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ APICCommonState *s = APIC_COMMON(obj);
+ DeviceState *dev = DEVICE(obj);
+ uint32_t value;
+
+ if (dev->realized) {
+ qdev_prop_set_after_realize(dev, name, errp);
+ return;
+ }
+
+ if (!visit_type_uint32(v, name, &value, errp)) {
+ return;
+ }
+
+ s->initial_apic_id = value;
+ s->id = (uint8_t)value;
+}
+
+static void apic_common_initfn(Object *obj)
+{
+ APICCommonState *s = APIC_COMMON(obj);
+
+ s->id = s->initial_apic_id = -1;
+ object_property_add(obj, "id", "uint32",
+ apic_common_get_id,
+ apic_common_set_id, NULL, NULL);
+}
+
+static void apic_common_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = apic_reset_common;
+ device_class_set_props(dc, apic_properties_common);
+ dc->realize = apic_common_realize;
+ dc->unrealize = apic_common_unrealize;
+ /*
+ * Reason: APIC and CPU need to be wired up by
+ * x86_cpu_apic_create()
+ */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo apic_common_type = {
+ .name = TYPE_APIC_COMMON,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(APICCommonState),
+ .instance_init = apic_common_initfn,
+ .class_size = sizeof(APICCommonClass),
+ .class_init = apic_common_class_init,
+ .abstract = true,
+};
+
+static void apic_common_register_types(void)
+{
+ type_register_static(&apic_common_type);
+}
+
+type_init(apic_common_register_types)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
new file mode 100644
index 000000000..a994b1f02
--- /dev/null
+++ b/hw/intc/arm_gic.c
@@ -0,0 +1,2146 @@
+/*
+ * ARM Generic/Distributed Interrupt Controller
+ *
+ * Copyright (c) 2006-2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+/* This file contains implementation code for the RealView EB interrupt
+ * controller, MPCore distributed interrupt controller and ARMv7-M
+ * Nested Vectored Interrupt Controller.
+ * It is compiled in two ways:
+ * (1) as a standalone file to produce a sysbus device which is a GIC
+ * that can be used on the realview board and as one of the builtin
+ * private peripherals for the ARM MP CPUs (11MPCore, A9, etc)
+ * (2) by being directly #included into armv7m_nvic.c to produce the
+ * armv7m_nvic device.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "gic_internal.h"
+#include "qapi/error.h"
+#include "hw/core/cpu.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "trace.h"
+#include "sysemu/kvm.h"
+#include "sysemu/qtest.h"
+
+/* #define DEBUG_GIC */
+
+#ifdef DEBUG_GIC
+#define DEBUG_GIC_GATE 1
+#else
+#define DEBUG_GIC_GATE 0
+#endif
+
+#define DPRINTF(fmt, ...) do { \
+ if (DEBUG_GIC_GATE) { \
+ fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+static const uint8_t gic_id_11mpcore[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1
+};
+
+static const uint8_t gic_id_gicv1[] = {
+ 0x04, 0x00, 0x00, 0x00, 0x90, 0xb3, 0x1b, 0x00, 0x0d, 0xf0, 0x05, 0xb1
+};
+
+static const uint8_t gic_id_gicv2[] = {
+ 0x04, 0x00, 0x00, 0x00, 0x90, 0xb4, 0x2b, 0x00, 0x0d, 0xf0, 0x05, 0xb1
+};
+
+static inline int gic_get_current_cpu(GICState *s)
+{
+ if (!qtest_enabled() && s->num_cpu > 1) {
+ return current_cpu->cpu_index;
+ }
+ return 0;
+}
+
+static inline int gic_get_current_vcpu(GICState *s)
+{
+ return gic_get_current_cpu(s) + GIC_NCPU;
+}
+
+/* Return true if this GIC config has interrupt groups, which is
+ * true if we're a GICv2, or a GICv1 with the security extensions.
+ */
+static inline bool gic_has_groups(GICState *s)
+{
+ return s->revision == 2 || s->security_extn;
+}
+
+static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs)
+{
+ return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure;
+}
+
+static inline void gic_get_best_irq(GICState *s, int cpu,
+ int *best_irq, int *best_prio, int *group)
+{
+ int irq;
+ int cm = 1 << cpu;
+
+ *best_irq = 1023;
+ *best_prio = 0x100;
+
+ for (irq = 0; irq < s->num_irq; irq++) {
+ if (GIC_DIST_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) &&
+ (!GIC_DIST_TEST_ACTIVE(irq, cm)) &&
+ (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) {
+ if (GIC_DIST_GET_PRIORITY(irq, cpu) < *best_prio) {
+ *best_prio = GIC_DIST_GET_PRIORITY(irq, cpu);
+ *best_irq = irq;
+ }
+ }
+ }
+
+ if (*best_irq < 1023) {
+ *group = GIC_DIST_TEST_GROUP(*best_irq, cm);
+ }
+}
+
+static inline void gic_get_best_virq(GICState *s, int cpu,
+ int *best_irq, int *best_prio, int *group)
+{
+ int lr_idx = 0;
+
+ *best_irq = 1023;
+ *best_prio = 0x100;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t lr_entry = s->h_lr[lr_idx][cpu];
+ int state = GICH_LR_STATE(lr_entry);
+
+ if (state == GICH_LR_STATE_PENDING) {
+ int prio = GICH_LR_PRIORITY(lr_entry);
+
+ if (prio < *best_prio) {
+ *best_prio = prio;
+ *best_irq = GICH_LR_VIRT_ID(lr_entry);
+ *group = GICH_LR_GROUP(lr_entry);
+ }
+ }
+ }
+}
+
+/* Return true if IRQ signaling is enabled for the given cpu and at least one
+ * of the given groups:
+ * - in the non-virt case, the distributor must be enabled for one of the
+ * given groups
+ * - in the virt case, the virtual interface must be enabled.
+ * - in all cases, the (v)CPU interface must be enabled for one of the given
+ * groups.
+ */
+static inline bool gic_irq_signaling_enabled(GICState *s, int cpu, bool virt,
+ int group_mask)
+{
+ int cpu_iface = virt ? (cpu + GIC_NCPU) : cpu;
+
+ if (!virt && !(s->ctlr & group_mask)) {
+ return false;
+ }
+
+ if (virt && !(s->h_hcr[cpu] & R_GICH_HCR_EN_MASK)) {
+ return false;
+ }
+
+ if (!(s->cpu_ctlr[cpu_iface] & group_mask)) {
+ return false;
+ }
+
+ return true;
+}
+
+/* TODO: Many places that call this routine could be optimized. */
+/* Update interrupt status after enabled or pending bits have been changed. */
+static inline void gic_update_internal(GICState *s, bool virt)
+{
+ int best_irq;
+ int best_prio;
+ int irq_level, fiq_level;
+ int cpu, cpu_iface;
+ int group = 0;
+ qemu_irq *irq_lines = virt ? s->parent_virq : s->parent_irq;
+ qemu_irq *fiq_lines = virt ? s->parent_vfiq : s->parent_fiq;
+
+ for (cpu = 0; cpu < s->num_cpu; cpu++) {
+ cpu_iface = virt ? (cpu + GIC_NCPU) : cpu;
+
+ s->current_pending[cpu_iface] = 1023;
+ if (!gic_irq_signaling_enabled(s, cpu, virt,
+ GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) {
+ qemu_irq_lower(irq_lines[cpu]);
+ qemu_irq_lower(fiq_lines[cpu]);
+ continue;
+ }
+
+ if (virt) {
+ gic_get_best_virq(s, cpu, &best_irq, &best_prio, &group);
+ } else {
+ gic_get_best_irq(s, cpu, &best_irq, &best_prio, &group);
+ }
+
+ if (best_irq != 1023) {
+ trace_gic_update_bestirq(virt ? "vcpu" : "cpu", cpu,
+ best_irq, best_prio,
+ s->priority_mask[cpu_iface],
+ s->running_priority[cpu_iface]);
+ }
+
+ irq_level = fiq_level = 0;
+
+ if (best_prio < s->priority_mask[cpu_iface]) {
+ s->current_pending[cpu_iface] = best_irq;
+ if (best_prio < s->running_priority[cpu_iface]) {
+ if (gic_irq_signaling_enabled(s, cpu, virt, 1 << group)) {
+ if (group == 0 &&
+ s->cpu_ctlr[cpu_iface] & GICC_CTLR_FIQ_EN) {
+ DPRINTF("Raised pending FIQ %d (cpu %d)\n",
+ best_irq, cpu_iface);
+ fiq_level = 1;
+ trace_gic_update_set_irq(cpu, virt ? "vfiq" : "fiq",
+ fiq_level);
+ } else {
+ DPRINTF("Raised pending IRQ %d (cpu %d)\n",
+ best_irq, cpu_iface);
+ irq_level = 1;
+ trace_gic_update_set_irq(cpu, virt ? "virq" : "irq",
+ irq_level);
+ }
+ }
+ }
+ }
+
+ qemu_set_irq(irq_lines[cpu], irq_level);
+ qemu_set_irq(fiq_lines[cpu], fiq_level);
+ }
+}
+
+static void gic_update(GICState *s)
+{
+ gic_update_internal(s, false);
+}
+
+/* Return true if this LR is empty, i.e. the corresponding bit
+ * in ELRSR is set.
+ */
+static inline bool gic_lr_entry_is_free(uint32_t entry)
+{
+ return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID)
+ && (GICH_LR_HW(entry) || !GICH_LR_EOI(entry));
+}
+
+/* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the
+ * corrsponding bit in EISR is set.
+ */
+static inline bool gic_lr_entry_is_eoi(uint32_t entry)
+{
+ return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID)
+ && !GICH_LR_HW(entry) && GICH_LR_EOI(entry);
+}
+
+static inline void gic_extract_lr_info(GICState *s, int cpu,
+ int *num_eoi, int *num_valid, int *num_pending)
+{
+ int lr_idx;
+
+ *num_eoi = 0;
+ *num_valid = 0;
+ *num_pending = 0;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+
+ if (gic_lr_entry_is_eoi(*entry)) {
+ (*num_eoi)++;
+ }
+
+ if (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID) {
+ (*num_valid)++;
+ }
+
+ if (GICH_LR_STATE(*entry) == GICH_LR_STATE_PENDING) {
+ (*num_pending)++;
+ }
+ }
+}
+
+static void gic_compute_misr(GICState *s, int cpu)
+{
+ uint32_t value = 0;
+ int vcpu = cpu + GIC_NCPU;
+
+ int num_eoi, num_valid, num_pending;
+
+ gic_extract_lr_info(s, cpu, &num_eoi, &num_valid, &num_pending);
+
+ /* EOI */
+ if (num_eoi) {
+ value |= R_GICH_MISR_EOI_MASK;
+ }
+
+ /* U: true if only 0 or 1 LR entry is valid */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_UIE_MASK) && (num_valid < 2)) {
+ value |= R_GICH_MISR_U_MASK;
+ }
+
+ /* LRENP: EOICount is not 0 */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_LRENPIE_MASK) &&
+ ((s->h_hcr[cpu] & R_GICH_HCR_EOICount_MASK) != 0)) {
+ value |= R_GICH_MISR_LRENP_MASK;
+ }
+
+ /* NP: no pending interrupts */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_NPIE_MASK) && (num_pending == 0)) {
+ value |= R_GICH_MISR_NP_MASK;
+ }
+
+ /* VGrp0E: group0 virq signaling enabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0EIE_MASK) &&
+ (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) {
+ value |= R_GICH_MISR_VGrp0E_MASK;
+ }
+
+ /* VGrp0D: group0 virq signaling disabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0DIE_MASK) &&
+ !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) {
+ value |= R_GICH_MISR_VGrp0D_MASK;
+ }
+
+ /* VGrp1E: group1 virq signaling enabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1EIE_MASK) &&
+ (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) {
+ value |= R_GICH_MISR_VGrp1E_MASK;
+ }
+
+ /* VGrp1D: group1 virq signaling disabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1DIE_MASK) &&
+ !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) {
+ value |= R_GICH_MISR_VGrp1D_MASK;
+ }
+
+ s->h_misr[cpu] = value;
+}
+
+static void gic_update_maintenance(GICState *s)
+{
+ int cpu = 0;
+ int maint_level;
+
+ for (cpu = 0; cpu < s->num_cpu; cpu++) {
+ gic_compute_misr(s, cpu);
+ maint_level = (s->h_hcr[cpu] & R_GICH_HCR_EN_MASK) && s->h_misr[cpu];
+
+ trace_gic_update_maintenance_irq(cpu, maint_level);
+ qemu_set_irq(s->maintenance_irq[cpu], maint_level);
+ }
+}
+
+static void gic_update_virt(GICState *s)
+{
+ gic_update_internal(s, true);
+ gic_update_maintenance(s);
+}
+
+static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
+ int cm, int target)
+{
+ if (level) {
+ GIC_DIST_SET_LEVEL(irq, cm);
+ if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) {
+ DPRINTF("Set %d pending mask %x\n", irq, target);
+ GIC_DIST_SET_PENDING(irq, target);
+ }
+ } else {
+ GIC_DIST_CLEAR_LEVEL(irq, cm);
+ }
+}
+
+static void gic_set_irq_generic(GICState *s, int irq, int level,
+ int cm, int target)
+{
+ if (level) {
+ GIC_DIST_SET_LEVEL(irq, cm);
+ DPRINTF("Set %d pending mask %x\n", irq, target);
+ if (GIC_DIST_TEST_EDGE_TRIGGER(irq)) {
+ GIC_DIST_SET_PENDING(irq, target);
+ }
+ } else {
+ GIC_DIST_CLEAR_LEVEL(irq, cm);
+ }
+}
+
+/* Process a change in an external IRQ input. */
+static void gic_set_irq(void *opaque, int irq, int level)
+{
+ /* Meaning of the 'irq' parameter:
+ * [0..N-1] : external interrupts
+ * [N..N+31] : PPI (internal) interrupts for CPU 0
+ * [N+32..N+63] : PPI (internal interrupts for CPU 1
+ * ...
+ */
+ GICState *s = (GICState *)opaque;
+ int cm, target;
+ if (irq < (s->num_irq - GIC_INTERNAL)) {
+ /* The first external input line is internal interrupt 32. */
+ cm = ALL_CPU_MASK;
+ irq += GIC_INTERNAL;
+ target = GIC_DIST_TARGET(irq);
+ } else {
+ int cpu;
+ irq -= (s->num_irq - GIC_INTERNAL);
+ cpu = irq / GIC_INTERNAL;
+ irq %= GIC_INTERNAL;
+ cm = 1 << cpu;
+ target = cm;
+ }
+
+ assert(irq >= GIC_NR_SGIS);
+
+ if (level == GIC_DIST_TEST_LEVEL(irq, cm)) {
+ return;
+ }
+
+ if (s->revision == REV_11MPCORE) {
+ gic_set_irq_11mpcore(s, irq, level, cm, target);
+ } else {
+ gic_set_irq_generic(s, irq, level, cm, target);
+ }
+ trace_gic_set_irq(irq, level, cm, target);
+
+ gic_update(s);
+}
+
+static uint16_t gic_get_current_pending_irq(GICState *s, int cpu,
+ MemTxAttrs attrs)
+{
+ uint16_t pending_irq = s->current_pending[cpu];
+
+ if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) {
+ int group = gic_test_group(s, pending_irq, cpu);
+
+ /* On a GIC without the security extensions, reading this register
+ * behaves in the same way as a secure access to a GIC with them.
+ */
+ bool secure = !gic_cpu_ns_access(s, cpu, attrs);
+
+ if (group == 0 && !secure) {
+ /* Group0 interrupts hidden from Non-secure access */
+ return 1023;
+ }
+ if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) {
+ /* Group1 interrupts only seen by Secure access if
+ * AckCtl bit set.
+ */
+ return 1022;
+ }
+ }
+ return pending_irq;
+}
+
+static int gic_get_group_priority(GICState *s, int cpu, int irq)
+{
+ /* Return the group priority of the specified interrupt
+ * (which is the top bits of its priority, with the number
+ * of bits masked determined by the applicable binary point register).
+ */
+ int bpr;
+ uint32_t mask;
+
+ if (gic_has_groups(s) &&
+ !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) &&
+ gic_test_group(s, irq, cpu)) {
+ bpr = s->abpr[cpu] - 1;
+ assert(bpr >= 0);
+ } else {
+ bpr = s->bpr[cpu];
+ }
+
+ /* a BPR of 0 means the group priority bits are [7:1];
+ * a BPR of 1 means they are [7:2], and so on down to
+ * a BPR of 7 meaning no group priority bits at all.
+ */
+ mask = ~0U << ((bpr & 7) + 1);
+
+ return gic_get_priority(s, irq, cpu) & mask;
+}
+
+static void gic_activate_irq(GICState *s, int cpu, int irq)
+{
+ /* Set the appropriate Active Priority Register bit for this IRQ,
+ * and update the running priority.
+ */
+ int prio = gic_get_group_priority(s, cpu, irq);
+ int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
+ int preemption_level = prio >> (min_bpr + 1);
+ int regno = preemption_level / 32;
+ int bitno = preemption_level % 32;
+ uint32_t *papr = NULL;
+
+ if (gic_is_vcpu(cpu)) {
+ assert(regno == 0);
+ papr = &s->h_apr[gic_get_vcpu_real_id(cpu)];
+ } else if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) {
+ papr = &s->nsapr[regno][cpu];
+ } else {
+ papr = &s->apr[regno][cpu];
+ }
+
+ *papr |= (1 << bitno);
+
+ s->running_priority[cpu] = prio;
+ gic_set_active(s, irq, cpu);
+}
+
+static int gic_get_prio_from_apr_bits(GICState *s, int cpu)
+{
+ /* Recalculate the current running priority for this CPU based
+ * on the set bits in the Active Priority Registers.
+ */
+ int i;
+
+ if (gic_is_vcpu(cpu)) {
+ uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)];
+ if (apr) {
+ return ctz32(apr) << (GIC_VIRT_MIN_BPR + 1);
+ } else {
+ return 0x100;
+ }
+ }
+
+ for (i = 0; i < GIC_NR_APRS; i++) {
+ uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu];
+ if (!apr) {
+ continue;
+ }
+ return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
+ }
+ return 0x100;
+}
+
+static void gic_drop_prio(GICState *s, int cpu, int group)
+{
+ /* Drop the priority of the currently active interrupt in the
+ * specified group.
+ *
+ * Note that we can guarantee (because of the requirement to nest
+ * GICC_IAR reads [which activate an interrupt and raise priority]
+ * with GICC_EOIR writes [which drop the priority for the interrupt])
+ * that the interrupt we're being called for is the highest priority
+ * active interrupt, meaning that it has the lowest set bit in the
+ * APR registers.
+ *
+ * If the guest does not honour the ordering constraints then the
+ * behaviour of the GIC is UNPREDICTABLE, which for us means that
+ * the values of the APR registers might become incorrect and the
+ * running priority will be wrong, so interrupts that should preempt
+ * might not do so, and interrupts that should not preempt might do so.
+ */
+ if (gic_is_vcpu(cpu)) {
+ int rcpu = gic_get_vcpu_real_id(cpu);
+
+ if (s->h_apr[rcpu]) {
+ /* Clear lowest set bit */
+ s->h_apr[rcpu] &= s->h_apr[rcpu] - 1;
+ }
+ } else {
+ int i;
+
+ for (i = 0; i < GIC_NR_APRS; i++) {
+ uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu];
+ if (!*papr) {
+ continue;
+ }
+ /* Clear lowest set bit */
+ *papr &= *papr - 1;
+ break;
+ }
+ }
+
+ s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu);
+}
+
+static inline uint32_t gic_clear_pending_sgi(GICState *s, int irq, int cpu)
+{
+ int src;
+ uint32_t ret;
+
+ if (!gic_is_vcpu(cpu)) {
+ /* Lookup the source CPU for the SGI and clear this in the
+ * sgi_pending map. Return the src and clear the overall pending
+ * state on this CPU if the SGI is not pending from any CPUs.
+ */
+ assert(s->sgi_pending[irq][cpu] != 0);
+ src = ctz32(s->sgi_pending[irq][cpu]);
+ s->sgi_pending[irq][cpu] &= ~(1 << src);
+ if (s->sgi_pending[irq][cpu] == 0) {
+ gic_clear_pending(s, irq, cpu);
+ }
+ ret = irq | ((src & 0x7) << 10);
+ } else {
+ uint32_t *lr_entry = gic_get_lr_entry(s, irq, cpu);
+ src = GICH_LR_CPUID(*lr_entry);
+
+ gic_clear_pending(s, irq, cpu);
+ ret = irq | (src << 10);
+ }
+
+ return ret;
+}
+
+uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
+{
+ int ret, irq;
+
+ /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately
+ * for the case where this GIC supports grouping and the pending interrupt
+ * is in the wrong group.
+ */
+ irq = gic_get_current_pending_irq(s, cpu, attrs);
+ trace_gic_acknowledge_irq(gic_is_vcpu(cpu) ? "vcpu" : "cpu",
+ gic_get_vcpu_real_id(cpu), irq);
+
+ if (irq >= GIC_MAXIRQ) {
+ DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq);
+ return irq;
+ }
+
+ if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) {
+ DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq);
+ return 1023;
+ }
+
+ gic_activate_irq(s, cpu, irq);
+
+ if (s->revision == REV_11MPCORE) {
+ /* Clear pending flags for both level and edge triggered interrupts.
+ * Level triggered IRQs will be reasserted once they become inactive.
+ */
+ gic_clear_pending(s, irq, cpu);
+ ret = irq;
+ } else {
+ if (irq < GIC_NR_SGIS) {
+ ret = gic_clear_pending_sgi(s, irq, cpu);
+ } else {
+ gic_clear_pending(s, irq, cpu);
+ ret = irq;
+ }
+ }
+
+ if (gic_is_vcpu(cpu)) {
+ gic_update_virt(s);
+ } else {
+ gic_update(s);
+ }
+ DPRINTF("ACK %d\n", irq);
+ return ret;
+}
+
+static uint32_t gic_fullprio_mask(GICState *s, int cpu)
+{
+ /*
+ * Return a mask word which clears the unimplemented priority
+ * bits from a priority value for an interrupt. (Not to be
+ * confused with the group priority, whose mask depends on BPR.)
+ */
+ int priBits;
+
+ if (gic_is_vcpu(cpu)) {
+ priBits = GIC_VIRT_MAX_GROUP_PRIO_BITS;
+ } else {
+ priBits = s->n_prio_bits;
+ }
+ return ~0U << (8 - priBits);
+}
+
+void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
+ MemTxAttrs attrs)
+{
+ if (s->security_extn && !attrs.secure) {
+ if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
+ return; /* Ignore Non-secure access of Group0 IRQ */
+ }
+ val = 0x80 | (val >> 1); /* Non-secure view */
+ }
+
+ val &= gic_fullprio_mask(s, cpu);
+
+ if (irq < GIC_INTERNAL) {
+ s->priority1[irq][cpu] = val;
+ } else {
+ s->priority2[(irq) - GIC_INTERNAL] = val;
+ }
+}
+
+static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq,
+ MemTxAttrs attrs)
+{
+ uint32_t prio = GIC_DIST_GET_PRIORITY(irq, cpu);
+
+ if (s->security_extn && !attrs.secure) {
+ if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
+ return 0; /* Non-secure access cannot read priority of Group0 IRQ */
+ }
+ prio = (prio << 1) & 0xff; /* Non-secure view */
+ }
+ return prio & gic_fullprio_mask(s, cpu);
+}
+
+static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask,
+ MemTxAttrs attrs)
+{
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
+ if (s->priority_mask[cpu] & 0x80) {
+ /* Priority Mask in upper half */
+ pmask = 0x80 | (pmask >> 1);
+ } else {
+ /* Non-secure write ignored if priority mask is in lower half */
+ return;
+ }
+ }
+ s->priority_mask[cpu] = pmask & gic_fullprio_mask(s, cpu);
+}
+
+static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs)
+{
+ uint32_t pmask = s->priority_mask[cpu];
+
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
+ if (pmask & 0x80) {
+ /* Priority Mask in upper half, return Non-secure view */
+ pmask = (pmask << 1) & 0xff;
+ } else {
+ /* Priority Mask in lower half, RAZ */
+ pmask = 0;
+ }
+ }
+ return pmask;
+}
+
+static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs)
+{
+ uint32_t ret = s->cpu_ctlr[cpu];
+
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
+ /* Construct the NS banked view of GICC_CTLR from the correct
+ * bits of the S banked view. We don't need to move the bypass
+ * control bits because we don't implement that (IMPDEF) part
+ * of the GIC architecture.
+ */
+ ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1;
+ }
+ return ret;
+}
+
+static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value,
+ MemTxAttrs attrs)
+{
+ uint32_t mask;
+
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
+ /* The NS view can only write certain bits in the register;
+ * the rest are unchanged
+ */
+ mask = GICC_CTLR_EN_GRP1;
+ if (s->revision == 2) {
+ mask |= GICC_CTLR_EOIMODE_NS;
+ }
+ s->cpu_ctlr[cpu] &= ~mask;
+ s->cpu_ctlr[cpu] |= (value << 1) & mask;
+ } else {
+ if (s->revision == 2) {
+ mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK;
+ } else {
+ mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK;
+ }
+ s->cpu_ctlr[cpu] = value & mask;
+ }
+ DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, "
+ "Group1 Interrupts %sabled\n", cpu,
+ (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis",
+ (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis");
+}
+
+static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs)
+{
+ if ((s->revision != REV_11MPCORE) && (s->running_priority[cpu] > 0xff)) {
+ /* Idle priority */
+ return 0xff;
+ }
+
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
+ if (s->running_priority[cpu] & 0x80) {
+ /* Running priority in upper half of range: return the Non-secure
+ * view of the priority.
+ */
+ return s->running_priority[cpu] << 1;
+ } else {
+ /* Running priority in lower half of range: RAZ */
+ return 0;
+ }
+ } else {
+ return s->running_priority[cpu];
+ }
+}
+
+/* Return true if we should split priority drop and interrupt deactivation,
+ * ie whether the relevant EOIMode bit is set.
+ */
+static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs)
+{
+ if (s->revision != 2) {
+ /* Before GICv2 prio-drop and deactivate are not separable */
+ return false;
+ }
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
+ return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS;
+ }
+ return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE;
+}
+
+static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
+{
+ int group;
+
+ if (irq >= GIC_MAXIRQ || (!gic_is_vcpu(cpu) && irq >= s->num_irq)) {
+ /*
+ * This handles two cases:
+ * 1. If software writes the ID of a spurious interrupt [ie 1023]
+ * to the GICC_DIR, the GIC ignores that write.
+ * 2. If software writes the number of a non-existent interrupt
+ * this must be a subcase of "value written is not an active interrupt"
+ * and so this is UNPREDICTABLE. We choose to ignore it. For vCPUs,
+ * all IRQs potentially exist, so this limit does not apply.
+ */
+ return;
+ }
+
+ if (!gic_eoi_split(s, cpu, attrs)) {
+ /* This is UNPREDICTABLE; we choose to ignore it */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_deactivate_irq: GICC_DIR write when EOIMode clear");
+ return;
+ }
+
+ if (gic_is_vcpu(cpu) && !gic_virq_is_valid(s, irq, cpu)) {
+ /* This vIRQ does not have an LR entry which is either active or
+ * pending and active. Increment EOICount and ignore the write.
+ */
+ int rcpu = gic_get_vcpu_real_id(cpu);
+ s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT;
+
+ /* Update the virtual interface in case a maintenance interrupt should
+ * be raised.
+ */
+ gic_update_virt(s);
+ return;
+ }
+
+ group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
+
+ if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
+ DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq);
+ return;
+ }
+
+ gic_clear_active(s, irq, cpu);
+}
+
+static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
+{
+ int cm = 1 << cpu;
+ int group;
+
+ DPRINTF("EOI %d\n", irq);
+ if (gic_is_vcpu(cpu)) {
+ /* The call to gic_prio_drop() will clear a bit in GICH_APR iff the
+ * running prio is < 0x100.
+ */
+ bool prio_drop = s->running_priority[cpu] < 0x100;
+
+ if (irq >= GIC_MAXIRQ) {
+ /* Ignore spurious interrupt */
+ return;
+ }
+
+ gic_drop_prio(s, cpu, 0);
+
+ if (!gic_eoi_split(s, cpu, attrs)) {
+ bool valid = gic_virq_is_valid(s, irq, cpu);
+ if (prio_drop && !valid) {
+ /* We are in a situation where:
+ * - V_CTRL.EOIMode is false (no EOI split),
+ * - The call to gic_drop_prio() cleared a bit in GICH_APR,
+ * - This vIRQ does not have an LR entry which is either
+ * active or pending and active.
+ * In that case, we must increment EOICount.
+ */
+ int rcpu = gic_get_vcpu_real_id(cpu);
+ s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT;
+ } else if (valid) {
+ gic_clear_active(s, irq, cpu);
+ }
+ }
+
+ gic_update_virt(s);
+ return;
+ }
+
+ if (irq >= s->num_irq) {
+ /* This handles two cases:
+ * 1. If software writes the ID of a spurious interrupt [ie 1023]
+ * to the GICC_EOIR, the GIC ignores that write.
+ * 2. If software writes the number of a non-existent interrupt
+ * this must be a subcase of "value written does not match the last
+ * valid interrupt value read from the Interrupt Acknowledge
+ * register" and so this is UNPREDICTABLE. We choose to ignore it.
+ */
+ return;
+ }
+ if (s->running_priority[cpu] == 0x100) {
+ return; /* No active IRQ. */
+ }
+
+ if (s->revision == REV_11MPCORE) {
+ /* Mark level triggered interrupts as pending if they are still
+ raised. */
+ if (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_ENABLED(irq, cm)
+ && GIC_DIST_TEST_LEVEL(irq, cm)
+ && (GIC_DIST_TARGET(irq) & cm) != 0) {
+ DPRINTF("Set %d pending mask %x\n", irq, cm);
+ GIC_DIST_SET_PENDING(irq, cm);
+ }
+ }
+
+ group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
+
+ if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
+ DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq);
+ return;
+ }
+
+ /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1
+ * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1,
+ * i.e. go ahead and complete the irq anyway.
+ */
+
+ gic_drop_prio(s, cpu, group);
+
+ /* In GICv2 the guest can choose to split priority-drop and deactivate */
+ if (!gic_eoi_split(s, cpu, attrs)) {
+ gic_clear_active(s, irq, cpu);
+ }
+ gic_update(s);
+}
+
+static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+ uint32_t res;
+ int irq;
+ int i;
+ int cpu;
+ int cm;
+ int mask;
+
+ cpu = gic_get_current_cpu(s);
+ cm = 1 << cpu;
+ if (offset < 0x100) {
+ if (offset == 0) { /* GICD_CTLR */
+ if (s->security_extn && !attrs.secure) {
+ /* The NS bank of this register is just an alias of the
+ * EnableGrp1 bit in the S bank version.
+ */
+ return extract32(s->ctlr, 1, 1);
+ } else {
+ return s->ctlr;
+ }
+ }
+ if (offset == 4)
+ /* Interrupt Controller Type Register */
+ return ((s->num_irq / 32) - 1)
+ | ((s->num_cpu - 1) << 5)
+ | (s->security_extn << 10);
+ if (offset < 0x08)
+ return 0;
+ if (offset >= 0x80) {
+ /* Interrupt Group Registers: these RAZ/WI if this is an NS
+ * access to a GIC with the security extensions, or if the GIC
+ * doesn't have groups at all.
+ */
+ res = 0;
+ if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) {
+ /* Every byte offset holds 8 group status bits */
+ irq = (offset - 0x080) * 8;
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+ for (i = 0; i < 8; i++) {
+ if (GIC_DIST_TEST_GROUP(irq + i, cm)) {
+ res |= (1 << i);
+ }
+ }
+ }
+ return res;
+ }
+ goto bad_reg;
+ } else if (offset < 0x200) {
+ /* Interrupt Set/Clear Enable. */
+ if (offset < 0x180)
+ irq = (offset - 0x100) * 8;
+ else
+ irq = (offset - 0x180) * 8;
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ res = 0;
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (GIC_DIST_TEST_ENABLED(irq + i, cm)) {
+ res |= (1 << i);
+ }
+ }
+ } else if (offset < 0x300) {
+ /* Interrupt Set/Clear Pending. */
+ if (offset < 0x280)
+ irq = (offset - 0x200) * 8;
+ else
+ irq = (offset - 0x280) * 8;
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ res = 0;
+ mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK;
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (gic_test_pending(s, irq + i, mask)) {
+ res |= (1 << i);
+ }
+ }
+ } else if (offset < 0x400) {
+ /* Interrupt Set/Clear Active. */
+ if (offset < 0x380) {
+ irq = (offset - 0x300) * 8;
+ } else if (s->revision == 2) {
+ irq = (offset - 0x380) * 8;
+ } else {
+ goto bad_reg;
+ }
+
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ res = 0;
+ mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK;
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (GIC_DIST_TEST_ACTIVE(irq + i, mask)) {
+ res |= (1 << i);
+ }
+ }
+ } else if (offset < 0x800) {
+ /* Interrupt Priority. */
+ irq = (offset - 0x400);
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ res = gic_dist_get_priority(s, cpu, irq, attrs);
+ } else if (offset < 0xc00) {
+ /* Interrupt CPU Target. */
+ if (s->num_cpu == 1 && s->revision != REV_11MPCORE) {
+ /* For uniprocessor GICs these RAZ/WI */
+ res = 0;
+ } else {
+ irq = (offset - 0x800);
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+ if (irq < 29 && s->revision == REV_11MPCORE) {
+ res = 0;
+ } else if (irq < GIC_INTERNAL) {
+ res = cm;
+ } else {
+ res = GIC_DIST_TARGET(irq);
+ }
+ }
+ } else if (offset < 0xf00) {
+ /* Interrupt Configuration. */
+ irq = (offset - 0xc00) * 4;
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ res = 0;
+ for (i = 0; i < 4; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (GIC_DIST_TEST_MODEL(irq + i)) {
+ res |= (1 << (i * 2));
+ }
+ if (GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) {
+ res |= (2 << (i * 2));
+ }
+ }
+ } else if (offset < 0xf10) {
+ goto bad_reg;
+ } else if (offset < 0xf30) {
+ if (s->revision == REV_11MPCORE) {
+ goto bad_reg;
+ }
+
+ if (offset < 0xf20) {
+ /* GICD_CPENDSGIRn */
+ irq = (offset - 0xf10);
+ } else {
+ irq = (offset - 0xf20);
+ /* GICD_SPENDSGIRn */
+ }
+
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
+ res = 0; /* Ignore Non-secure access of Group0 IRQ */
+ } else {
+ res = s->sgi_pending[irq][cpu];
+ }
+ } else if (offset < 0xfd0) {
+ goto bad_reg;
+ } else if (offset < 0x1000) {
+ if (offset & 3) {
+ res = 0;
+ } else {
+ switch (s->revision) {
+ case REV_11MPCORE:
+ res = gic_id_11mpcore[(offset - 0xfd0) >> 2];
+ break;
+ case 1:
+ res = gic_id_gicv1[(offset - 0xfd0) >> 2];
+ break;
+ case 2:
+ res = gic_id_gicv2[(offset - 0xfd0) >> 2];
+ break;
+ default:
+ res = 0;
+ }
+ }
+ } else {
+ g_assert_not_reached();
+ }
+ return res;
+bad_reg:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_dist_readb: Bad offset %x\n", (int)offset);
+ return 0;
+}
+
+static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ switch (size) {
+ case 1:
+ *data = gic_dist_readb(opaque, offset, attrs);
+ break;
+ case 2:
+ *data = gic_dist_readb(opaque, offset, attrs);
+ *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8;
+ break;
+ case 4:
+ *data = gic_dist_readb(opaque, offset, attrs);
+ *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8;
+ *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16;
+ *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24;
+ break;
+ default:
+ return MEMTX_ERROR;
+ }
+
+ trace_gic_dist_read(offset, size, *data);
+ return MEMTX_OK;
+}
+
+static void gic_dist_writeb(void *opaque, hwaddr offset,
+ uint32_t value, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+ int irq;
+ int i;
+ int cpu;
+
+ cpu = gic_get_current_cpu(s);
+ if (offset < 0x100) {
+ if (offset == 0) {
+ if (s->security_extn && !attrs.secure) {
+ /* NS version is just an alias of the S version's bit 1 */
+ s->ctlr = deposit32(s->ctlr, 1, 1, value);
+ } else if (gic_has_groups(s)) {
+ s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1);
+ } else {
+ s->ctlr = value & GICD_CTLR_EN_GRP0;
+ }
+ DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n",
+ s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis",
+ s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis");
+ } else if (offset < 4) {
+ /* ignored. */
+ } else if (offset >= 0x80) {
+ /* Interrupt Group Registers: RAZ/WI for NS access to secure
+ * GIC, or for GICs without groups.
+ */
+ if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) {
+ /* Every byte offset holds 8 group status bits */
+ irq = (offset - 0x80) * 8;
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+ for (i = 0; i < 8; i++) {
+ /* Group bits are banked for private interrupts */
+ int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
+ if (value & (1 << i)) {
+ /* Group1 (Non-secure) */
+ GIC_DIST_SET_GROUP(irq + i, cm);
+ } else {
+ /* Group0 (Secure) */
+ GIC_DIST_CLEAR_GROUP(irq + i, cm);
+ }
+ }
+ }
+ } else {
+ goto bad_reg;
+ }
+ } else if (offset < 0x180) {
+ /* Interrupt Set Enable. */
+ irq = (offset - 0x100) * 8;
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ if (irq < GIC_NR_SGIS) {
+ value = 0xff;
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (value & (1 << i)) {
+ int mask =
+ (irq < GIC_INTERNAL) ? (1 << cpu)
+ : GIC_DIST_TARGET(irq + i);
+ int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
+
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (!GIC_DIST_TEST_ENABLED(irq + i, cm)) {
+ DPRINTF("Enabled IRQ %d\n", irq + i);
+ trace_gic_enable_irq(irq + i);
+ }
+ GIC_DIST_SET_ENABLED(irq + i, cm);
+ /* If a raised level triggered IRQ enabled then mark
+ is as pending. */
+ if (GIC_DIST_TEST_LEVEL(irq + i, mask)
+ && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) {
+ DPRINTF("Set %d pending mask %x\n", irq + i, mask);
+ GIC_DIST_SET_PENDING(irq + i, mask);
+ }
+ }
+ }
+ } else if (offset < 0x200) {
+ /* Interrupt Clear Enable. */
+ irq = (offset - 0x180) * 8;
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ if (irq < GIC_NR_SGIS) {
+ value = 0;
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (value & (1 << i)) {
+ int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
+
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (GIC_DIST_TEST_ENABLED(irq + i, cm)) {
+ DPRINTF("Disabled IRQ %d\n", irq + i);
+ trace_gic_disable_irq(irq + i);
+ }
+ GIC_DIST_CLEAR_ENABLED(irq + i, cm);
+ }
+ }
+ } else if (offset < 0x280) {
+ /* Interrupt Set Pending. */
+ irq = (offset - 0x200) * 8;
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ if (irq < GIC_NR_SGIS) {
+ value = 0;
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (value & (1 << i)) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i));
+ }
+ }
+ } else if (offset < 0x300) {
+ /* Interrupt Clear Pending. */
+ irq = (offset - 0x280) * 8;
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ if (irq < GIC_NR_SGIS) {
+ value = 0;
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ /* ??? This currently clears the pending bit for all CPUs, even
+ for per-CPU interrupts. It's unclear whether this is the
+ corect behavior. */
+ if (value & (1 << i)) {
+ GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
+ }
+ }
+ } else if (offset < 0x380) {
+ /* Interrupt Set Active. */
+ if (s->revision != 2) {
+ goto bad_reg;
+ }
+
+ irq = (offset - 0x300) * 8;
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+
+ /* This register is banked per-cpu for PPIs */
+ int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK;
+
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (value & (1 << i)) {
+ GIC_DIST_SET_ACTIVE(irq + i, cm);
+ }
+ }
+ } else if (offset < 0x400) {
+ /* Interrupt Clear Active. */
+ if (s->revision != 2) {
+ goto bad_reg;
+ }
+
+ irq = (offset - 0x380) * 8;
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+
+ /* This register is banked per-cpu for PPIs */
+ int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK;
+
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (value & (1 << i)) {
+ GIC_DIST_CLEAR_ACTIVE(irq + i, cm);
+ }
+ }
+ } else if (offset < 0x800) {
+ /* Interrupt Priority. */
+ irq = (offset - 0x400);
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ gic_dist_set_priority(s, cpu, irq, value, attrs);
+ } else if (offset < 0xc00) {
+ /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the
+ * annoying exception of the 11MPCore's GIC.
+ */
+ if (s->num_cpu != 1 || s->revision == REV_11MPCORE) {
+ irq = (offset - 0x800);
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+ if (irq < 29 && s->revision == REV_11MPCORE) {
+ value = 0;
+ } else if (irq < GIC_INTERNAL) {
+ value = ALL_CPU_MASK;
+ }
+ s->irq_target[irq] = value & ALL_CPU_MASK;
+ }
+ } else if (offset < 0xf00) {
+ /* Interrupt Configuration. */
+ irq = (offset - 0xc00) * 4;
+ if (irq >= s->num_irq)
+ goto bad_reg;
+ if (irq < GIC_NR_SGIS)
+ value |= 0xaa;
+ for (i = 0; i < 4; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (s->revision == REV_11MPCORE) {
+ if (value & (1 << (i * 2))) {
+ GIC_DIST_SET_MODEL(irq + i);
+ } else {
+ GIC_DIST_CLEAR_MODEL(irq + i);
+ }
+ }
+ if (value & (2 << (i * 2))) {
+ GIC_DIST_SET_EDGE_TRIGGER(irq + i);
+ } else {
+ GIC_DIST_CLEAR_EDGE_TRIGGER(irq + i);
+ }
+ }
+ } else if (offset < 0xf10) {
+ /* 0xf00 is only handled for 32-bit writes. */
+ goto bad_reg;
+ } else if (offset < 0xf20) {
+ /* GICD_CPENDSGIRn */
+ if (s->revision == REV_11MPCORE) {
+ goto bad_reg;
+ }
+ irq = (offset - 0xf10);
+
+ if (!s->security_extn || attrs.secure ||
+ GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
+ s->sgi_pending[irq][cpu] &= ~value;
+ if (s->sgi_pending[irq][cpu] == 0) {
+ GIC_DIST_CLEAR_PENDING(irq, 1 << cpu);
+ }
+ }
+ } else if (offset < 0xf30) {
+ /* GICD_SPENDSGIRn */
+ if (s->revision == REV_11MPCORE) {
+ goto bad_reg;
+ }
+ irq = (offset - 0xf20);
+
+ if (!s->security_extn || attrs.secure ||
+ GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
+ GIC_DIST_SET_PENDING(irq, 1 << cpu);
+ s->sgi_pending[irq][cpu] |= value;
+ }
+ } else {
+ goto bad_reg;
+ }
+ gic_update(s);
+ return;
+bad_reg:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_dist_writeb: Bad offset %x\n", (int)offset);
+}
+
+static void gic_dist_writew(void *opaque, hwaddr offset,
+ uint32_t value, MemTxAttrs attrs)
+{
+ gic_dist_writeb(opaque, offset, value & 0xff, attrs);
+ gic_dist_writeb(opaque, offset + 1, value >> 8, attrs);
+}
+
+static void gic_dist_writel(void *opaque, hwaddr offset,
+ uint32_t value, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+ if (offset == 0xf00) {
+ int cpu;
+ int irq;
+ int mask;
+ int target_cpu;
+
+ cpu = gic_get_current_cpu(s);
+ irq = value & 0xf;
+ switch ((value >> 24) & 3) {
+ case 0:
+ mask = (value >> 16) & ALL_CPU_MASK;
+ break;
+ case 1:
+ mask = ALL_CPU_MASK ^ (1 << cpu);
+ break;
+ case 2:
+ mask = 1 << cpu;
+ break;
+ default:
+ DPRINTF("Bad Soft Int target filter\n");
+ mask = ALL_CPU_MASK;
+ break;
+ }
+ GIC_DIST_SET_PENDING(irq, mask);
+ target_cpu = ctz32(mask);
+ while (target_cpu < GIC_NCPU) {
+ s->sgi_pending[irq][target_cpu] |= (1 << cpu);
+ mask &= ~(1 << target_cpu);
+ target_cpu = ctz32(mask);
+ }
+ gic_update(s);
+ return;
+ }
+ gic_dist_writew(opaque, offset, value & 0xffff, attrs);
+ gic_dist_writew(opaque, offset + 2, value >> 16, attrs);
+}
+
+static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size, MemTxAttrs attrs)
+{
+ trace_gic_dist_write(offset, size, data);
+
+ switch (size) {
+ case 1:
+ gic_dist_writeb(opaque, offset, data, attrs);
+ return MEMTX_OK;
+ case 2:
+ gic_dist_writew(opaque, offset, data, attrs);
+ return MEMTX_OK;
+ case 4:
+ gic_dist_writel(opaque, offset, data, attrs);
+ return MEMTX_OK;
+ default:
+ return MEMTX_ERROR;
+ }
+}
+
+static inline uint32_t gic_apr_ns_view(GICState *s, int cpu, int regno)
+{
+ /* Return the Nonsecure view of GICC_APR<regno>. This is the
+ * second half of GICC_NSAPR.
+ */
+ switch (GIC_MIN_BPR) {
+ case 0:
+ if (regno < 2) {
+ return s->nsapr[regno + 2][cpu];
+ }
+ break;
+ case 1:
+ if (regno == 0) {
+ return s->nsapr[regno + 1][cpu];
+ }
+ break;
+ case 2:
+ if (regno == 0) {
+ return extract32(s->nsapr[0][cpu], 16, 16);
+ }
+ break;
+ case 3:
+ if (regno == 0) {
+ return extract32(s->nsapr[0][cpu], 8, 8);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return 0;
+}
+
+static inline void gic_apr_write_ns_view(GICState *s, int cpu, int regno,
+ uint32_t value)
+{
+ /* Write the Nonsecure view of GICC_APR<regno>. */
+ switch (GIC_MIN_BPR) {
+ case 0:
+ if (regno < 2) {
+ s->nsapr[regno + 2][cpu] = value;
+ }
+ break;
+ case 1:
+ if (regno == 0) {
+ s->nsapr[regno + 1][cpu] = value;
+ }
+ break;
+ case 2:
+ if (regno == 0) {
+ s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 16, 16, value);
+ }
+ break;
+ case 3:
+ if (regno == 0) {
+ s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 8, 8, value);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case 0x00: /* Control */
+ *data = gic_get_cpu_control(s, cpu, attrs);
+ break;
+ case 0x04: /* Priority mask */
+ *data = gic_get_priority_mask(s, cpu, attrs);
+ break;
+ case 0x08: /* Binary Point */
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
+ if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) {
+ /* NS view of BPR when CBPR is 1 */
+ *data = MIN(s->bpr[cpu] + 1, 7);
+ } else {
+ /* BPR is banked. Non-secure copy stored in ABPR. */
+ *data = s->abpr[cpu];
+ }
+ } else {
+ *data = s->bpr[cpu];
+ }
+ break;
+ case 0x0c: /* Acknowledge */
+ *data = gic_acknowledge_irq(s, cpu, attrs);
+ break;
+ case 0x14: /* Running Priority */
+ *data = gic_get_running_priority(s, cpu, attrs);
+ break;
+ case 0x18: /* Highest Pending Interrupt */
+ *data = gic_get_current_pending_irq(s, cpu, attrs);
+ break;
+ case 0x1c: /* Aliased Binary Point */
+ /* GIC v2, no security: ABPR
+ * GIC v1, no security: not implemented (RAZ/WI)
+ * With security extensions, secure access: ABPR (alias of NS BPR)
+ * With security extensions, nonsecure access: RAZ/WI
+ */
+ if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
+ *data = 0;
+ } else {
+ *data = s->abpr[cpu];
+ }
+ break;
+ case 0xd0: case 0xd4: case 0xd8: case 0xdc:
+ {
+ int regno = (offset - 0xd0) / 4;
+ int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS;
+
+ if (regno >= nr_aprs || s->revision != 2) {
+ *data = 0;
+ } else if (gic_is_vcpu(cpu)) {
+ *data = s->h_apr[gic_get_vcpu_real_id(cpu)];
+ } else if (gic_cpu_ns_access(s, cpu, attrs)) {
+ /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
+ *data = gic_apr_ns_view(s, regno, cpu);
+ } else {
+ *data = s->apr[regno][cpu];
+ }
+ break;
+ }
+ case 0xe0: case 0xe4: case 0xe8: case 0xec:
+ {
+ int regno = (offset - 0xe0) / 4;
+
+ if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) ||
+ gic_cpu_ns_access(s, cpu, attrs) || gic_is_vcpu(cpu)) {
+ *data = 0;
+ } else {
+ *data = s->nsapr[regno][cpu];
+ }
+ break;
+ }
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_cpu_read: Bad offset %x\n", (int)offset);
+ *data = 0;
+ break;
+ }
+
+ trace_gic_cpu_read(gic_is_vcpu(cpu) ? "vcpu" : "cpu",
+ gic_get_vcpu_real_id(cpu), offset, *data);
+ return MEMTX_OK;
+}
+
+static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
+ uint32_t value, MemTxAttrs attrs)
+{
+ trace_gic_cpu_write(gic_is_vcpu(cpu) ? "vcpu" : "cpu",
+ gic_get_vcpu_real_id(cpu), offset, value);
+
+ switch (offset) {
+ case 0x00: /* Control */
+ gic_set_cpu_control(s, cpu, value, attrs);
+ break;
+ case 0x04: /* Priority mask */
+ gic_set_priority_mask(s, cpu, value, attrs);
+ break;
+ case 0x08: /* Binary Point */
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
+ if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) {
+ /* WI when CBPR is 1 */
+ return MEMTX_OK;
+ } else {
+ s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR);
+ }
+ } else {
+ int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
+ s->bpr[cpu] = MAX(value & 0x7, min_bpr);
+ }
+ break;
+ case 0x10: /* End Of Interrupt */
+ gic_complete_irq(s, cpu, value & 0x3ff, attrs);
+ return MEMTX_OK;
+ case 0x1c: /* Aliased Binary Point */
+ if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
+ /* unimplemented, or NS access: RAZ/WI */
+ return MEMTX_OK;
+ } else {
+ s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR);
+ }
+ break;
+ case 0xd0: case 0xd4: case 0xd8: case 0xdc:
+ {
+ int regno = (offset - 0xd0) / 4;
+ int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS;
+
+ if (regno >= nr_aprs || s->revision != 2) {
+ return MEMTX_OK;
+ }
+ if (gic_is_vcpu(cpu)) {
+ s->h_apr[gic_get_vcpu_real_id(cpu)] = value;
+ } else if (gic_cpu_ns_access(s, cpu, attrs)) {
+ /* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
+ gic_apr_write_ns_view(s, regno, cpu, value);
+ } else {
+ s->apr[regno][cpu] = value;
+ }
+ break;
+ }
+ case 0xe0: case 0xe4: case 0xe8: case 0xec:
+ {
+ int regno = (offset - 0xe0) / 4;
+
+ if (regno >= GIC_NR_APRS || s->revision != 2) {
+ return MEMTX_OK;
+ }
+ if (gic_is_vcpu(cpu)) {
+ return MEMTX_OK;
+ }
+ if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
+ return MEMTX_OK;
+ }
+ s->nsapr[regno][cpu] = value;
+ break;
+ }
+ case 0x1000:
+ /* GICC_DIR */
+ gic_deactivate_irq(s, cpu, value & 0x3ff, attrs);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_cpu_write: Bad offset %x\n", (int)offset);
+ return MEMTX_OK;
+ }
+
+ if (gic_is_vcpu(cpu)) {
+ gic_update_virt(s);
+ } else {
+ gic_update(s);
+ }
+
+ return MEMTX_OK;
+}
+
+/* Wrappers to read/write the GIC CPU interface for the current CPU */
+static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+ return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs);
+}
+
+static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+ return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs);
+}
+
+/* Wrappers to read/write the GIC CPU interface for a specific CPU.
+ * These just decode the opaque pointer into GICState* + cpu id.
+ */
+static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState **backref = (GICState **)opaque;
+ GICState *s = *backref;
+ int id = (backref - s->backref);
+ return gic_cpu_read(s, id, addr, data, attrs);
+}
+
+static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState **backref = (GICState **)opaque;
+ GICState *s = *backref;
+ int id = (backref - s->backref);
+ return gic_cpu_write(s, id, addr, value, attrs);
+}
+
+static MemTxResult gic_thisvcpu_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_cpu_read(s, gic_get_current_vcpu(s), addr, data, attrs);
+}
+
+static MemTxResult gic_thisvcpu_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_cpu_write(s, gic_get_current_vcpu(s), addr, value, attrs);
+}
+
+static uint32_t gic_compute_eisr(GICState *s, int cpu, int lr_start)
+{
+ int lr_idx;
+ uint32_t ret = 0;
+
+ for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+ ret = deposit32(ret, lr_idx - lr_start, 1,
+ gic_lr_entry_is_eoi(*entry));
+ }
+
+ return ret;
+}
+
+static uint32_t gic_compute_elrsr(GICState *s, int cpu, int lr_start)
+{
+ int lr_idx;
+ uint32_t ret = 0;
+
+ for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+ ret = deposit32(ret, lr_idx - lr_start, 1,
+ gic_lr_entry_is_free(*entry));
+ }
+
+ return ret;
+}
+
+static void gic_vmcr_write(GICState *s, uint32_t value, MemTxAttrs attrs)
+{
+ int vcpu = gic_get_current_vcpu(s);
+ uint32_t ctlr;
+ uint32_t abpr;
+ uint32_t bpr;
+ uint32_t prio_mask;
+
+ ctlr = FIELD_EX32(value, GICH_VMCR, VMCCtlr);
+ abpr = FIELD_EX32(value, GICH_VMCR, VMABP);
+ bpr = FIELD_EX32(value, GICH_VMCR, VMBP);
+ prio_mask = FIELD_EX32(value, GICH_VMCR, VMPriMask) << 3;
+
+ gic_set_cpu_control(s, vcpu, ctlr, attrs);
+ s->abpr[vcpu] = MAX(abpr, GIC_VIRT_MIN_ABPR);
+ s->bpr[vcpu] = MAX(bpr, GIC_VIRT_MIN_BPR);
+ gic_set_priority_mask(s, vcpu, prio_mask, attrs);
+}
+
+static MemTxResult gic_hyp_read(void *opaque, int cpu, hwaddr addr,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ GICState *s = ARM_GIC(opaque);
+ int vcpu = cpu + GIC_NCPU;
+
+ switch (addr) {
+ case A_GICH_HCR: /* Hypervisor Control */
+ *data = s->h_hcr[cpu];
+ break;
+
+ case A_GICH_VTR: /* VGIC Type */
+ *data = FIELD_DP32(0, GICH_VTR, ListRegs, s->num_lrs - 1);
+ *data = FIELD_DP32(*data, GICH_VTR, PREbits,
+ GIC_VIRT_MAX_GROUP_PRIO_BITS - 1);
+ *data = FIELD_DP32(*data, GICH_VTR, PRIbits,
+ (7 - GIC_VIRT_MIN_BPR) - 1);
+ break;
+
+ case A_GICH_VMCR: /* Virtual Machine Control */
+ *data = FIELD_DP32(0, GICH_VMCR, VMCCtlr,
+ extract32(s->cpu_ctlr[vcpu], 0, 10));
+ *data = FIELD_DP32(*data, GICH_VMCR, VMABP, s->abpr[vcpu]);
+ *data = FIELD_DP32(*data, GICH_VMCR, VMBP, s->bpr[vcpu]);
+ *data = FIELD_DP32(*data, GICH_VMCR, VMPriMask,
+ extract32(s->priority_mask[vcpu], 3, 5));
+ break;
+
+ case A_GICH_MISR: /* Maintenance Interrupt Status */
+ *data = s->h_misr[cpu];
+ break;
+
+ case A_GICH_EISR0: /* End of Interrupt Status 0 and 1 */
+ case A_GICH_EISR1:
+ *data = gic_compute_eisr(s, cpu, (addr - A_GICH_EISR0) * 8);
+ break;
+
+ case A_GICH_ELRSR0: /* Empty List Status 0 and 1 */
+ case A_GICH_ELRSR1:
+ *data = gic_compute_elrsr(s, cpu, (addr - A_GICH_ELRSR0) * 8);
+ break;
+
+ case A_GICH_APR: /* Active Priorities */
+ *data = s->h_apr[cpu];
+ break;
+
+ case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */
+ {
+ int lr_idx = (addr - A_GICH_LR0) / 4;
+
+ if (lr_idx > s->num_lrs) {
+ *data = 0;
+ } else {
+ *data = s->h_lr[lr_idx][cpu];
+ }
+ break;
+ }
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_hyp_read: Bad offset %" HWADDR_PRIx "\n", addr);
+ return MEMTX_OK;
+ }
+
+ trace_gic_hyp_read(addr, *data);
+ return MEMTX_OK;
+}
+
+static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr,
+ uint64_t value, MemTxAttrs attrs)
+{
+ GICState *s = ARM_GIC(opaque);
+ int vcpu = cpu + GIC_NCPU;
+
+ trace_gic_hyp_write(addr, value);
+
+ switch (addr) {
+ case A_GICH_HCR: /* Hypervisor Control */
+ s->h_hcr[cpu] = value & GICH_HCR_MASK;
+ break;
+
+ case A_GICH_VMCR: /* Virtual Machine Control */
+ gic_vmcr_write(s, value, attrs);
+ break;
+
+ case A_GICH_APR: /* Active Priorities */
+ s->h_apr[cpu] = value;
+ s->running_priority[vcpu] = gic_get_prio_from_apr_bits(s, vcpu);
+ break;
+
+ case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */
+ {
+ int lr_idx = (addr - A_GICH_LR0) / 4;
+
+ if (lr_idx > s->num_lrs) {
+ return MEMTX_OK;
+ }
+
+ s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK;
+ trace_gic_lr_entry(cpu, lr_idx, s->h_lr[lr_idx][cpu]);
+ break;
+ }
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_hyp_write: Bad offset %" HWADDR_PRIx "\n", addr);
+ return MEMTX_OK;
+ }
+
+ gic_update_virt(s);
+ return MEMTX_OK;
+}
+
+static MemTxResult gic_thiscpu_hyp_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_hyp_read(s, gic_get_current_cpu(s), addr, data, attrs);
+}
+
+static MemTxResult gic_thiscpu_hyp_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_hyp_write(s, gic_get_current_cpu(s), addr, value, attrs);
+}
+
+static MemTxResult gic_do_hyp_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState **backref = (GICState **)opaque;
+ GICState *s = *backref;
+ int id = (backref - s->backref);
+
+ return gic_hyp_read(s, id, addr, data, attrs);
+}
+
+static MemTxResult gic_do_hyp_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState **backref = (GICState **)opaque;
+ GICState *s = *backref;
+ int id = (backref - s->backref);
+
+ return gic_hyp_write(s, id + GIC_NCPU, addr, value, attrs);
+
+}
+
+static const MemoryRegionOps gic_ops[2] = {
+ {
+ .read_with_attrs = gic_dist_read,
+ .write_with_attrs = gic_dist_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ },
+ {
+ .read_with_attrs = gic_thiscpu_read,
+ .write_with_attrs = gic_thiscpu_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ }
+};
+
+static const MemoryRegionOps gic_cpu_ops = {
+ .read_with_attrs = gic_do_cpu_read,
+ .write_with_attrs = gic_do_cpu_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const MemoryRegionOps gic_virt_ops[2] = {
+ {
+ .read_with_attrs = gic_thiscpu_hyp_read,
+ .write_with_attrs = gic_thiscpu_hyp_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ },
+ {
+ .read_with_attrs = gic_thisvcpu_read,
+ .write_with_attrs = gic_thisvcpu_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ }
+};
+
+static const MemoryRegionOps gic_viface_ops = {
+ .read_with_attrs = gic_do_hyp_read,
+ .write_with_attrs = gic_do_hyp_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void arm_gic_realize(DeviceState *dev, Error **errp)
+{
+ /* Device instance realize function for the GIC sysbus device */
+ int i;
+ GICState *s = ARM_GIC(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ ARMGICClass *agc = ARM_GIC_GET_CLASS(s);
+ Error *local_err = NULL;
+
+ agc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (kvm_enabled() && !kvm_arm_supports_user_irq()) {
+ error_setg(errp, "KVM with user space irqchip only works when the "
+ "host kernel supports KVM_CAP_ARM_USER_IRQ");
+ return;
+ }
+
+ if (s->n_prio_bits > GIC_MAX_PRIORITY_BITS ||
+ (s->virt_extn ? s->n_prio_bits < GIC_VIRT_MAX_GROUP_PRIO_BITS :
+ s->n_prio_bits < GIC_MIN_PRIORITY_BITS)) {
+ error_setg(errp, "num-priority-bits cannot be greater than %d"
+ " or less than %d", GIC_MAX_PRIORITY_BITS,
+ s->virt_extn ? GIC_VIRT_MAX_GROUP_PRIO_BITS :
+ GIC_MIN_PRIORITY_BITS);
+ return;
+ }
+
+ /* This creates distributor, main CPU interface (s->cpuiomem[0]) and if
+ * enabled, virtualization extensions related interfaces (main virtual
+ * interface (s->vifaceiomem[0]) and virtual CPU interface).
+ */
+ gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, gic_virt_ops);
+
+ /* Extra core-specific regions for the CPU interfaces. This is
+ * necessary for "franken-GIC" implementations, for example on
+ * Exynos 4.
+ * NB that the memory region size of 0x100 applies for the 11MPCore
+ * and also cores following the GIC v1 spec (ie A9).
+ * GIC v2 defines a larger memory region (0x1000) so this will need
+ * to be extended when we implement A15.
+ */
+ for (i = 0; i < s->num_cpu; i++) {
+ s->backref[i] = s;
+ memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops,
+ &s->backref[i], "gic_cpu", 0x100);
+ sysbus_init_mmio(sbd, &s->cpuiomem[i+1]);
+ }
+
+ /* Extra core-specific regions for virtual interfaces. This is required by
+ * the GICv2 specification.
+ */
+ if (s->virt_extn) {
+ for (i = 0; i < s->num_cpu; i++) {
+ memory_region_init_io(&s->vifaceiomem[i + 1], OBJECT(s),
+ &gic_viface_ops, &s->backref[i],
+ "gic_viface", 0x200);
+ sysbus_init_mmio(sbd, &s->vifaceiomem[i + 1]);
+ }
+ }
+
+}
+
+static void arm_gic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ARMGICClass *agc = ARM_GIC_CLASS(klass);
+
+ device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
+}
+
+static const TypeInfo arm_gic_info = {
+ .name = TYPE_ARM_GIC,
+ .parent = TYPE_ARM_GIC_COMMON,
+ .instance_size = sizeof(GICState),
+ .class_init = arm_gic_class_init,
+ .class_size = sizeof(ARMGICClass),
+};
+
+static void arm_gic_register_types(void)
+{
+ type_register_static(&arm_gic_info);
+}
+
+type_init(arm_gic_register_types)
diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c
new file mode 100644
index 000000000..7b44d5625
--- /dev/null
+++ b/hw/intc/arm_gic_common.c
@@ -0,0 +1,394 @@
+/*
+ * ARM GIC support - common bits of emulated and KVM kernel model
+ *
+ * Copyright (c) 2012 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "gic_internal.h"
+#include "hw/arm/linux-boot-if.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+
+static int gic_pre_save(void *opaque)
+{
+ GICState *s = (GICState *)opaque;
+ ARMGICCommonClass *c = ARM_GIC_COMMON_GET_CLASS(s);
+
+ if (c->pre_save) {
+ c->pre_save(s);
+ }
+
+ return 0;
+}
+
+static int gic_post_load(void *opaque, int version_id)
+{
+ GICState *s = (GICState *)opaque;
+ ARMGICCommonClass *c = ARM_GIC_COMMON_GET_CLASS(s);
+
+ if (c->post_load) {
+ c->post_load(s);
+ }
+ return 0;
+}
+
+static bool gic_virt_state_needed(void *opaque)
+{
+ GICState *s = (GICState *)opaque;
+
+ return s->virt_extn;
+}
+
+static const VMStateDescription vmstate_gic_irq_state = {
+ .name = "arm_gic_irq_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(enabled, gic_irq_state),
+ VMSTATE_UINT8(pending, gic_irq_state),
+ VMSTATE_UINT8(active, gic_irq_state),
+ VMSTATE_UINT8(level, gic_irq_state),
+ VMSTATE_BOOL(model, gic_irq_state),
+ VMSTATE_BOOL(edge_trigger, gic_irq_state),
+ VMSTATE_UINT8(group, gic_irq_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_gic_virt_state = {
+ .name = "arm_gic_virt_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = gic_virt_state_needed,
+ .fields = (VMStateField[]) {
+ /* Virtual interface */
+ VMSTATE_UINT32_ARRAY(h_hcr, GICState, GIC_NCPU),
+ VMSTATE_UINT32_ARRAY(h_misr, GICState, GIC_NCPU),
+ VMSTATE_UINT32_2DARRAY(h_lr, GICState, GIC_MAX_LR, GIC_NCPU),
+ VMSTATE_UINT32_ARRAY(h_apr, GICState, GIC_NCPU),
+
+ /* Virtual CPU interfaces */
+ VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, GIC_NCPU, GIC_NCPU),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_gic = {
+ .name = "arm_gic",
+ .version_id = 12,
+ .minimum_version_id = 12,
+ .pre_save = gic_pre_save,
+ .post_load = gic_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(ctlr, GICState),
+ VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, 0, GIC_NCPU),
+ VMSTATE_STRUCT_ARRAY(irq_state, GICState, GIC_MAXIRQ, 1,
+ vmstate_gic_irq_state, gic_irq_state),
+ VMSTATE_UINT8_ARRAY(irq_target, GICState, GIC_MAXIRQ),
+ VMSTATE_UINT8_2DARRAY(priority1, GICState, GIC_INTERNAL, GIC_NCPU),
+ VMSTATE_UINT8_ARRAY(priority2, GICState, GIC_MAXIRQ - GIC_INTERNAL),
+ VMSTATE_UINT8_2DARRAY(sgi_pending, GICState, GIC_NR_SGIS, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT32_2DARRAY(apr, GICState, GIC_NR_APRS, GIC_NCPU),
+ VMSTATE_UINT32_2DARRAY(nsapr, GICState, GIC_NR_APRS, GIC_NCPU),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_gic_virt_state,
+ NULL
+ }
+};
+
+void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
+ const MemoryRegionOps *ops,
+ const MemoryRegionOps *virt_ops)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(s);
+ int i = s->num_irq - GIC_INTERNAL;
+
+ /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
+ * GPIO array layout is thus:
+ * [0..N-1] SPIs
+ * [N..N+31] PPIs for CPU 0
+ * [N+32..N+63] PPIs for CPU 1
+ * ...
+ */
+ i += (GIC_INTERNAL * s->num_cpu);
+ qdev_init_gpio_in(DEVICE(s), handler, i);
+
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->parent_irq[i]);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->parent_fiq[i]);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->parent_virq[i]);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->parent_vfiq[i]);
+ }
+ if (s->virt_extn) {
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->maintenance_irq[i]);
+ }
+ }
+
+ /* Distributor */
+ memory_region_init_io(&s->iomem, OBJECT(s), ops, s, "gic_dist", 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ /* This is the main CPU interface "for this core". It is always
+ * present because it is required by both software emulation and KVM.
+ */
+ memory_region_init_io(&s->cpuiomem[0], OBJECT(s), ops ? &ops[1] : NULL,
+ s, "gic_cpu", s->revision == 2 ? 0x2000 : 0x100);
+ sysbus_init_mmio(sbd, &s->cpuiomem[0]);
+
+ if (s->virt_extn) {
+ memory_region_init_io(&s->vifaceiomem[0], OBJECT(s), virt_ops,
+ s, "gic_viface", 0x1000);
+ sysbus_init_mmio(sbd, &s->vifaceiomem[0]);
+
+ memory_region_init_io(&s->vcpuiomem, OBJECT(s),
+ virt_ops ? &virt_ops[1] : NULL,
+ s, "gic_vcpu", 0x2000);
+ sysbus_init_mmio(sbd, &s->vcpuiomem);
+ }
+}
+
+static void arm_gic_common_realize(DeviceState *dev, Error **errp)
+{
+ GICState *s = ARM_GIC_COMMON(dev);
+ int num_irq = s->num_irq;
+
+ if (s->num_cpu > GIC_NCPU) {
+ error_setg(errp, "requested %u CPUs exceeds GIC maximum %d",
+ s->num_cpu, GIC_NCPU);
+ return;
+ }
+ if (s->num_irq > GIC_MAXIRQ) {
+ error_setg(errp,
+ "requested %u interrupt lines exceeds GIC maximum %d",
+ num_irq, GIC_MAXIRQ);
+ return;
+ }
+ /* ITLinesNumber is represented as (N / 32) - 1 (see
+ * gic_dist_readb) so this is an implementation imposed
+ * restriction, not an architectural one:
+ */
+ if (s->num_irq < 32 || (s->num_irq % 32)) {
+ error_setg(errp,
+ "%d interrupt lines unsupported: not divisible by 32",
+ num_irq);
+ return;
+ }
+
+ if (s->security_extn &&
+ (s->revision == REV_11MPCORE)) {
+ error_setg(errp, "this GIC revision does not implement "
+ "the security extensions");
+ return;
+ }
+
+ if (s->virt_extn) {
+ if (s->revision != 2) {
+ error_setg(errp, "GIC virtualization extensions are only "
+ "supported by revision 2");
+ return;
+ }
+
+ /* For now, set the number of implemented LRs to 4, as found in most
+ * real GICv2. This could be promoted as a QOM property if we need to
+ * emulate a variant with another num_lrs.
+ */
+ s->num_lrs = 4;
+ }
+}
+
+static inline void arm_gic_common_reset_irq_state(GICState *s, int first_cpu,
+ int resetprio)
+{
+ int i, j;
+
+ for (i = first_cpu; i < first_cpu + s->num_cpu; i++) {
+ if (s->revision == REV_11MPCORE) {
+ s->priority_mask[i] = 0xf0;
+ } else {
+ s->priority_mask[i] = resetprio;
+ }
+ s->current_pending[i] = 1023;
+ s->running_priority[i] = 0x100;
+ s->cpu_ctlr[i] = 0;
+ s->bpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
+ s->abpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_ABPR : GIC_MIN_ABPR;
+
+ if (!gic_is_vcpu(i)) {
+ for (j = 0; j < GIC_INTERNAL; j++) {
+ s->priority1[j][i] = resetprio;
+ }
+ for (j = 0; j < GIC_NR_SGIS; j++) {
+ s->sgi_pending[j][i] = 0;
+ }
+ }
+ }
+}
+
+static void arm_gic_common_reset(DeviceState *dev)
+{
+ GICState *s = ARM_GIC_COMMON(dev);
+ int i, j;
+ int resetprio;
+
+ /* If we're resetting a TZ-aware GIC as if secure firmware
+ * had set it up ready to start a kernel in non-secure,
+ * we need to set interrupt priorities to a "zero for the
+ * NS view" value. This is particularly critical for the
+ * priority_mask[] values, because if they are zero then NS
+ * code cannot ever rewrite the priority to anything else.
+ */
+ if (s->security_extn && s->irq_reset_nonsecure) {
+ resetprio = 0x80;
+ } else {
+ resetprio = 0;
+ }
+
+ memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state));
+ arm_gic_common_reset_irq_state(s, 0, resetprio);
+
+ if (s->virt_extn) {
+ /* vCPU states are stored at indexes GIC_NCPU .. GIC_NCPU+num_cpu.
+ * The exposed vCPU interface does not have security extensions.
+ */
+ arm_gic_common_reset_irq_state(s, GIC_NCPU, 0);
+ }
+
+ for (i = 0; i < GIC_NR_SGIS; i++) {
+ GIC_DIST_SET_ENABLED(i, ALL_CPU_MASK);
+ GIC_DIST_SET_EDGE_TRIGGER(i);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->priority2); i++) {
+ s->priority2[i] = resetprio;
+ }
+
+ for (i = 0; i < GIC_MAXIRQ; i++) {
+ /* For uniprocessor GICs all interrupts always target the sole CPU */
+ if (s->num_cpu == 1) {
+ s->irq_target[i] = 1;
+ } else {
+ s->irq_target[i] = 0;
+ }
+ }
+ if (s->security_extn && s->irq_reset_nonsecure) {
+ for (i = 0; i < GIC_MAXIRQ; i++) {
+ GIC_DIST_SET_GROUP(i, ALL_CPU_MASK);
+ }
+ }
+
+ if (s->virt_extn) {
+ for (i = 0; i < s->num_lrs; i++) {
+ for (j = 0; j < s->num_cpu; j++) {
+ s->h_lr[i][j] = 0;
+ }
+ }
+
+ for (i = 0; i < s->num_cpu; i++) {
+ s->h_hcr[i] = 0;
+ s->h_misr[i] = 0;
+ }
+ }
+
+ s->ctlr = 0;
+}
+
+static void arm_gic_common_linux_init(ARMLinuxBootIf *obj,
+ bool secure_boot)
+{
+ GICState *s = ARM_GIC_COMMON(obj);
+
+ if (s->security_extn && !secure_boot) {
+ /* We're directly booting a kernel into NonSecure. If this GIC
+ * implements the security extensions then we must configure it
+ * to have all the interrupts be NonSecure (this is a job that
+ * is done by the Secure boot firmware in real hardware, and in
+ * this mode QEMU is acting as a minimalist firmware-and-bootloader
+ * equivalent).
+ */
+ s->irq_reset_nonsecure = true;
+ }
+}
+
+static Property arm_gic_common_properties[] = {
+ DEFINE_PROP_UINT32("num-cpu", GICState, num_cpu, 1),
+ DEFINE_PROP_UINT32("num-irq", GICState, num_irq, 32),
+ /* Revision can be 1 or 2 for GIC architecture specification
+ * versions 1 or 2, or 0 to indicate the legacy 11MPCore GIC.
+ */
+ DEFINE_PROP_UINT32("revision", GICState, revision, 1),
+ /* True if the GIC should implement the security extensions */
+ DEFINE_PROP_BOOL("has-security-extensions", GICState, security_extn, 0),
+ /* True if the GIC should implement the virtualization extensions */
+ DEFINE_PROP_BOOL("has-virtualization-extensions", GICState, virt_extn, 0),
+ DEFINE_PROP_UINT32("num-priority-bits", GICState, n_prio_bits, 8),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void arm_gic_common_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass);
+
+ dc->reset = arm_gic_common_reset;
+ dc->realize = arm_gic_common_realize;
+ device_class_set_props(dc, arm_gic_common_properties);
+ dc->vmsd = &vmstate_gic;
+ albifc->arm_linux_init = arm_gic_common_linux_init;
+}
+
+static const TypeInfo arm_gic_common_type = {
+ .name = TYPE_ARM_GIC_COMMON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(GICState),
+ .class_size = sizeof(ARMGICCommonClass),
+ .class_init = arm_gic_common_class_init,
+ .abstract = true,
+ .interfaces = (InterfaceInfo []) {
+ { TYPE_ARM_LINUX_BOOT_IF },
+ { },
+ },
+};
+
+static void register_types(void)
+{
+ type_register_static(&arm_gic_common_type);
+}
+
+type_init(register_types)
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
new file mode 100644
index 000000000..7d2a13273
--- /dev/null
+++ b/hw/intc/arm_gic_kvm.c
@@ -0,0 +1,619 @@
+/*
+ * ARM Generic Interrupt Controller using KVM in-kernel support
+ *
+ * Copyright (c) 2012 Linaro Limited
+ * Written by Peter Maydell
+ * Save/Restore logic added by Christoffer Dall.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "migration/blocker.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "gic_internal.h"
+#include "vgic_common.h"
+#include "qom/object.h"
+
+#define TYPE_KVM_ARM_GIC "kvm-arm-gic"
+typedef struct KVMARMGICClass KVMARMGICClass;
+/* This is reusing the GICState typedef from ARM_GIC_COMMON */
+DECLARE_OBJ_CHECKERS(GICState, KVMARMGICClass,
+ KVM_ARM_GIC, TYPE_KVM_ARM_GIC)
+
+struct KVMARMGICClass {
+ ARMGICCommonClass parent_class;
+ DeviceRealize parent_realize;
+ void (*parent_reset)(DeviceState *dev);
+};
+
+void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level)
+{
+ /* Meaning of the 'irq' parameter:
+ * [0..N-1] : external interrupts
+ * [N..N+31] : PPI (internal) interrupts for CPU 0
+ * [N+32..N+63] : PPI (internal interrupts for CPU 1
+ * ...
+ * Convert this to the kernel's desired encoding, which
+ * has separate fields in the irq number for type,
+ * CPU number and interrupt number.
+ */
+ int irqtype, cpu;
+
+ if (irq < (num_irq - GIC_INTERNAL)) {
+ /* External interrupt. The kernel numbers these like the GIC
+ * hardware, with external interrupt IDs starting after the
+ * internal ones.
+ */
+ irqtype = KVM_ARM_IRQ_TYPE_SPI;
+ cpu = 0;
+ irq += GIC_INTERNAL;
+ } else {
+ /* Internal interrupt: decode into (cpu, interrupt id) */
+ irqtype = KVM_ARM_IRQ_TYPE_PPI;
+ irq -= (num_irq - GIC_INTERNAL);
+ cpu = irq / GIC_INTERNAL;
+ irq %= GIC_INTERNAL;
+ }
+ kvm_arm_set_irq(cpu, irqtype, irq, !!level);
+}
+
+static void kvm_arm_gicv2_set_irq(void *opaque, int irq, int level)
+{
+ GICState *s = (GICState *)opaque;
+
+ kvm_arm_gic_set_irq(s->num_irq, irq, level);
+}
+
+static bool kvm_arm_gic_can_save_restore(GICState *s)
+{
+ return s->dev_fd >= 0;
+}
+
+#define KVM_VGIC_ATTR(offset, cpu) \
+ ((((uint64_t)(cpu) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & \
+ KVM_DEV_ARM_VGIC_CPUID_MASK) | \
+ (((uint64_t)(offset) << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) & \
+ KVM_DEV_ARM_VGIC_OFFSET_MASK))
+
+static void kvm_gicd_access(GICState *s, int offset, int cpu,
+ uint32_t *val, bool write)
+{
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
+ KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort);
+}
+
+static void kvm_gicc_access(GICState *s, int offset, int cpu,
+ uint32_t *val, bool write)
+{
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+ KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort);
+}
+
+#define for_each_irq_reg(_ctr, _max_irq, _field_width) \
+ for (_ctr = 0; _ctr < ((_max_irq) / (32 / (_field_width))); _ctr++)
+
+/*
+ * Translate from the in-kernel field for an IRQ value to/from the qemu
+ * representation.
+ */
+typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel);
+
+/* synthetic translate function used for clear/set registers to completely
+ * clear a setting using a clear-register before setting the remaining bits
+ * using a set-register */
+static void translate_clear(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel)
+{
+ if (to_kernel) {
+ *field = ~0;
+ } else {
+ /* does not make sense: qemu model doesn't use set/clear regs */
+ abort();
+ }
+}
+
+static void translate_group(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel)
+{
+ int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
+
+ if (to_kernel) {
+ *field = GIC_DIST_TEST_GROUP(irq, cm);
+ } else {
+ if (*field & 1) {
+ GIC_DIST_SET_GROUP(irq, cm);
+ }
+ }
+}
+
+static void translate_enabled(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel)
+{
+ int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
+
+ if (to_kernel) {
+ *field = GIC_DIST_TEST_ENABLED(irq, cm);
+ } else {
+ if (*field & 1) {
+ GIC_DIST_SET_ENABLED(irq, cm);
+ }
+ }
+}
+
+static void translate_pending(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel)
+{
+ int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
+
+ if (to_kernel) {
+ *field = gic_test_pending(s, irq, cm);
+ } else {
+ if (*field & 1) {
+ GIC_DIST_SET_PENDING(irq, cm);
+ /* TODO: Capture is level-line is held high in the kernel */
+ }
+ }
+}
+
+static void translate_active(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel)
+{
+ int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
+
+ if (to_kernel) {
+ *field = GIC_DIST_TEST_ACTIVE(irq, cm);
+ } else {
+ if (*field & 1) {
+ GIC_DIST_SET_ACTIVE(irq, cm);
+ }
+ }
+}
+
+static void translate_trigger(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel)
+{
+ if (to_kernel) {
+ *field = (GIC_DIST_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0;
+ } else {
+ if (*field & 0x2) {
+ GIC_DIST_SET_EDGE_TRIGGER(irq);
+ }
+ }
+}
+
+static void translate_priority(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel)
+{
+ if (to_kernel) {
+ *field = GIC_DIST_GET_PRIORITY(irq, cpu) & 0xff;
+ } else {
+ gic_dist_set_priority(s, cpu, irq,
+ *field & 0xff, MEMTXATTRS_UNSPECIFIED);
+ }
+}
+
+static void translate_targets(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel)
+{
+ if (to_kernel) {
+ *field = s->irq_target[irq] & 0xff;
+ } else {
+ s->irq_target[irq] = *field & 0xff;
+ }
+}
+
+static void translate_sgisource(GICState *s, int irq, int cpu,
+ uint32_t *field, bool to_kernel)
+{
+ if (to_kernel) {
+ *field = s->sgi_pending[irq][cpu] & 0xff;
+ } else {
+ s->sgi_pending[irq][cpu] = *field & 0xff;
+ }
+}
+
+/* Read a register group from the kernel VGIC */
+static void kvm_dist_get(GICState *s, uint32_t offset, int width,
+ int maxirq, vgic_translate_fn translate_fn)
+{
+ uint32_t reg;
+ int i;
+ int j;
+ int irq;
+ int cpu;
+ int regsz = 32 / width; /* irqs per kernel register */
+ uint32_t field;
+
+ for_each_irq_reg(i, maxirq, width) {
+ irq = i * regsz;
+ cpu = 0;
+ while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
+ kvm_gicd_access(s, offset, cpu, &reg, false);
+ for (j = 0; j < regsz; j++) {
+ field = extract32(reg, j * width, width);
+ translate_fn(s, irq + j, cpu, &field, false);
+ }
+
+ cpu++;
+ }
+ offset += 4;
+ }
+}
+
+/* Write a register group to the kernel VGIC */
+static void kvm_dist_put(GICState *s, uint32_t offset, int width,
+ int maxirq, vgic_translate_fn translate_fn)
+{
+ uint32_t reg;
+ int i;
+ int j;
+ int irq;
+ int cpu;
+ int regsz = 32 / width; /* irqs per kernel register */
+ uint32_t field;
+
+ for_each_irq_reg(i, maxirq, width) {
+ irq = i * regsz;
+ cpu = 0;
+ while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
+ reg = 0;
+ for (j = 0; j < regsz; j++) {
+ translate_fn(s, irq + j, cpu, &field, true);
+ reg = deposit32(reg, j * width, width, field);
+ }
+ kvm_gicd_access(s, offset, cpu, &reg, true);
+
+ cpu++;
+ }
+ offset += 4;
+ }
+}
+
+static void kvm_arm_gic_put(GICState *s)
+{
+ uint32_t reg;
+ int i;
+ int cpu;
+ int num_cpu;
+ int num_irq;
+
+ /* Note: We do the restore in a slightly different order than the save
+ * (where the order doesn't matter and is simply ordered according to the
+ * register offset values */
+
+ /*****************************************************************
+ * Distributor State
+ */
+
+ /* s->ctlr -> GICD_CTLR */
+ reg = s->ctlr;
+ kvm_gicd_access(s, 0x0, 0, &reg, true);
+
+ /* Sanity checking on GICD_TYPER and s->num_irq, s->num_cpu */
+ kvm_gicd_access(s, 0x4, 0, &reg, false);
+ num_irq = ((reg & 0x1f) + 1) * 32;
+ num_cpu = ((reg & 0xe0) >> 5) + 1;
+
+ if (num_irq < s->num_irq) {
+ fprintf(stderr, "Restoring %u IRQs, but kernel supports max %d\n",
+ s->num_irq, num_irq);
+ abort();
+ } else if (num_cpu != s->num_cpu) {
+ fprintf(stderr, "Restoring %u CPU interfaces, kernel only has %d\n",
+ s->num_cpu, num_cpu);
+ /* Did we not create the VCPUs in the kernel yet? */
+ abort();
+ }
+
+ /* TODO: Consider checking compatibility with the IIDR ? */
+
+ /* irq_state[n].enabled -> GICD_ISENABLERn */
+ kvm_dist_put(s, 0x180, 1, s->num_irq, translate_clear);
+ kvm_dist_put(s, 0x100, 1, s->num_irq, translate_enabled);
+
+ /* irq_state[n].group -> GICD_IGROUPRn */
+ kvm_dist_put(s, 0x80, 1, s->num_irq, translate_group);
+
+ /* s->irq_target[irq] -> GICD_ITARGETSRn
+ * (restore targets before pending to ensure the pending state is set on
+ * the appropriate CPU interfaces in the kernel) */
+ kvm_dist_put(s, 0x800, 8, s->num_irq, translate_targets);
+
+ /* irq_state[n].trigger -> GICD_ICFGRn
+ * (restore configuration registers before pending IRQs so we treat
+ * level/edge correctly) */
+ kvm_dist_put(s, 0xc00, 2, s->num_irq, translate_trigger);
+
+ /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */
+ kvm_dist_put(s, 0x280, 1, s->num_irq, translate_clear);
+ kvm_dist_put(s, 0x200, 1, s->num_irq, translate_pending);
+
+ /* irq_state[n].active -> GICD_ISACTIVERn */
+ kvm_dist_put(s, 0x380, 1, s->num_irq, translate_clear);
+ kvm_dist_put(s, 0x300, 1, s->num_irq, translate_active);
+
+
+ /* s->priorityX[irq] -> ICD_IPRIORITYRn */
+ kvm_dist_put(s, 0x400, 8, s->num_irq, translate_priority);
+
+ /* s->sgi_pending -> ICD_CPENDSGIRn */
+ kvm_dist_put(s, 0xf10, 8, GIC_NR_SGIS, translate_clear);
+ kvm_dist_put(s, 0xf20, 8, GIC_NR_SGIS, translate_sgisource);
+
+
+ /*****************************************************************
+ * CPU Interface(s) State
+ */
+
+ for (cpu = 0; cpu < s->num_cpu; cpu++) {
+ /* s->cpu_ctlr[cpu] -> GICC_CTLR */
+ reg = s->cpu_ctlr[cpu];
+ kvm_gicc_access(s, 0x00, cpu, &reg, true);
+
+ /* s->priority_mask[cpu] -> GICC_PMR */
+ reg = (s->priority_mask[cpu] & 0xff);
+ kvm_gicc_access(s, 0x04, cpu, &reg, true);
+
+ /* s->bpr[cpu] -> GICC_BPR */
+ reg = (s->bpr[cpu] & 0x7);
+ kvm_gicc_access(s, 0x08, cpu, &reg, true);
+
+ /* s->abpr[cpu] -> GICC_ABPR */
+ reg = (s->abpr[cpu] & 0x7);
+ kvm_gicc_access(s, 0x1c, cpu, &reg, true);
+
+ /* s->apr[n][cpu] -> GICC_APRn */
+ for (i = 0; i < 4; i++) {
+ reg = s->apr[i][cpu];
+ kvm_gicc_access(s, 0xd0 + i * 4, cpu, &reg, true);
+ }
+ }
+}
+
+static void kvm_arm_gic_get(GICState *s)
+{
+ uint32_t reg;
+ int i;
+ int cpu;
+
+ /*****************************************************************
+ * Distributor State
+ */
+
+ /* GICD_CTLR -> s->ctlr */
+ kvm_gicd_access(s, 0x0, 0, &reg, false);
+ s->ctlr = reg;
+
+ /* Sanity checking on GICD_TYPER -> s->num_irq, s->num_cpu */
+ kvm_gicd_access(s, 0x4, 0, &reg, false);
+ s->num_irq = ((reg & 0x1f) + 1) * 32;
+ s->num_cpu = ((reg & 0xe0) >> 5) + 1;
+
+ if (s->num_irq > GIC_MAXIRQ) {
+ fprintf(stderr, "Too many IRQs reported from the kernel: %d\n",
+ s->num_irq);
+ abort();
+ }
+
+ /* GICD_IIDR -> ? */
+ kvm_gicd_access(s, 0x8, 0, &reg, false);
+
+ /* Clear all the IRQ settings */
+ for (i = 0; i < s->num_irq; i++) {
+ memset(&s->irq_state[i], 0, sizeof(s->irq_state[0]));
+ }
+
+ /* GICD_IGROUPRn -> irq_state[n].group */
+ kvm_dist_get(s, 0x80, 1, s->num_irq, translate_group);
+
+ /* GICD_ISENABLERn -> irq_state[n].enabled */
+ kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled);
+
+ /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */
+ kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending);
+
+ /* GICD_ISACTIVERn -> irq_state[n].active */
+ kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active);
+
+ /* GICD_ICFRn -> irq_state[n].trigger */
+ kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger);
+
+ /* GICD_IPRIORITYRn -> s->priorityX[irq] */
+ kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority);
+
+ /* GICD_ITARGETSRn -> s->irq_target[irq] */
+ kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets);
+
+ /* GICD_CPENDSGIRn -> s->sgi_pending */
+ kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource);
+
+
+ /*****************************************************************
+ * CPU Interface(s) State
+ */
+
+ for (cpu = 0; cpu < s->num_cpu; cpu++) {
+ /* GICC_CTLR -> s->cpu_ctlr[cpu] */
+ kvm_gicc_access(s, 0x00, cpu, &reg, false);
+ s->cpu_ctlr[cpu] = reg;
+
+ /* GICC_PMR -> s->priority_mask[cpu] */
+ kvm_gicc_access(s, 0x04, cpu, &reg, false);
+ s->priority_mask[cpu] = (reg & 0xff);
+
+ /* GICC_BPR -> s->bpr[cpu] */
+ kvm_gicc_access(s, 0x08, cpu, &reg, false);
+ s->bpr[cpu] = (reg & 0x7);
+
+ /* GICC_ABPR -> s->abpr[cpu] */
+ kvm_gicc_access(s, 0x1c, cpu, &reg, false);
+ s->abpr[cpu] = (reg & 0x7);
+
+ /* GICC_APRn -> s->apr[n][cpu] */
+ for (i = 0; i < 4; i++) {
+ kvm_gicc_access(s, 0xd0 + i * 4, cpu, &reg, false);
+ s->apr[i][cpu] = reg;
+ }
+ }
+}
+
+static void kvm_arm_gic_reset(DeviceState *dev)
+{
+ GICState *s = ARM_GIC_COMMON(dev);
+ KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
+
+ kgc->parent_reset(dev);
+
+ if (kvm_arm_gic_can_save_restore(s)) {
+ kvm_arm_gic_put(s);
+ }
+}
+
+static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
+{
+ int i;
+ GICState *s = KVM_ARM_GIC(dev);
+ KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
+ Error *local_err = NULL;
+ int ret;
+
+ kgc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (s->security_extn) {
+ error_setg(errp, "the in-kernel VGIC does not implement the "
+ "security extensions");
+ return;
+ }
+
+ if (s->virt_extn) {
+ error_setg(errp, "the in-kernel VGIC does not implement the "
+ "virtualization extensions");
+ return;
+ }
+
+ if (!kvm_arm_gic_can_save_restore(s)) {
+ error_setg(&s->migration_blocker, "This operating system kernel does "
+ "not support vGICv2 migration");
+ if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
+ error_free(s->migration_blocker);
+ return;
+ }
+ }
+
+ gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL, NULL);
+
+ for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
+ qemu_irq irq = qdev_get_gpio_in(dev, i);
+ kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i);
+ }
+
+ /* Try to create the device via the device control API */
+ s->dev_fd = -1;
+ ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false);
+ if (ret >= 0) {
+ s->dev_fd = ret;
+
+ /* Newstyle API is used, we may have attributes */
+ if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) {
+ uint32_t numirqs = s->num_irq;
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0,
+ &numirqs, true, &error_abort);
+ }
+ /* Tell the kernel to complete VGIC initialization now */
+ if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT)) {
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true,
+ &error_abort);
+ }
+ } else if (kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) {
+ error_setg_errno(errp, -ret, "error creating in-kernel VGIC");
+ error_append_hint(errp,
+ "Perhaps the host CPU does not support GICv2?\n");
+ } else if (ret != -ENODEV && ret != -ENOTSUP) {
+ /*
+ * Very ancient kernel without KVM_CAP_DEVICE_CTRL: assume that
+ * ENODEV or ENOTSUP mean "can't create GICv2 with KVM_CREATE_DEVICE",
+ * and that we will get a GICv2 via KVM_CREATE_IRQCHIP.
+ */
+ error_setg_errno(errp, -ret, "error creating in-kernel VGIC");
+ return;
+ }
+
+ /* Distributor */
+ kvm_arm_register_device(&s->iomem,
+ (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
+ | KVM_VGIC_V2_ADDR_TYPE_DIST,
+ KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V2_ADDR_TYPE_DIST,
+ s->dev_fd, 0);
+ /* CPU interface for current core. Unlike arm_gic, we don't
+ * provide the "interface for core #N" memory regions, because
+ * cores with a VGIC don't have those.
+ */
+ kvm_arm_register_device(&s->cpuiomem[0],
+ (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
+ | KVM_VGIC_V2_ADDR_TYPE_CPU,
+ KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V2_ADDR_TYPE_CPU,
+ s->dev_fd, 0);
+
+ if (kvm_has_gsi_routing()) {
+ /* set up irq routing */
+ for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
+ kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
+ }
+
+ kvm_gsi_routing_allowed = true;
+
+ kvm_irqchip_commit_routes(kvm_state);
+ }
+}
+
+static void kvm_arm_gic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass);
+ KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass);
+
+ agcc->pre_save = kvm_arm_gic_get;
+ agcc->post_load = kvm_arm_gic_put;
+ device_class_set_parent_realize(dc, kvm_arm_gic_realize,
+ &kgc->parent_realize);
+ device_class_set_parent_reset(dc, kvm_arm_gic_reset, &kgc->parent_reset);
+}
+
+static const TypeInfo kvm_arm_gic_info = {
+ .name = TYPE_KVM_ARM_GIC,
+ .parent = TYPE_ARM_GIC_COMMON,
+ .instance_size = sizeof(GICState),
+ .class_init = kvm_arm_gic_class_init,
+ .class_size = sizeof(KVMARMGICClass),
+};
+
+static void kvm_arm_gic_register_types(void)
+{
+ type_register_static(&kvm_arm_gic_info);
+}
+
+type_init(kvm_arm_gic_register_types)
diff --git a/hw/intc/arm_gicv2m.c b/hw/intc/arm_gicv2m.c
new file mode 100644
index 000000000..d564b857e
--- /dev/null
+++ b/hw/intc/arm_gicv2m.c
@@ -0,0 +1,200 @@
+/*
+ * GICv2m extension for MSI/MSI-x support with a GICv2-based system
+ *
+ * Copyright (C) 2015 Linaro, All rights reserved.
+ *
+ * Author: Christoffer Dall <christoffer.dall@linaro.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* This file implements an emulated GICv2m widget as described in the ARM
+ * Server Base System Architecture (SBSA) specification Version 2.2
+ * (ARM-DEN-0029 v2.2) pages 35-39 without any optional implementation defined
+ * identification registers and with a single non-secure MSI register frame.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "hw/pci/msi.h"
+#include "hw/qdev-properties.h"
+#include "sysemu/kvm.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qom/object.h"
+
+#define TYPE_ARM_GICV2M "arm-gicv2m"
+OBJECT_DECLARE_SIMPLE_TYPE(ARMGICv2mState, ARM_GICV2M)
+
+#define GICV2M_NUM_SPI_MAX 128
+
+#define V2M_MSI_TYPER 0x008
+#define V2M_MSI_SETSPI_NS 0x040
+#define V2M_MSI_IIDR 0xFCC
+#define V2M_IIDR0 0xFD0
+#define V2M_IIDR11 0xFFC
+
+#define PRODUCT_ID_QEMU 0x51 /* ASCII code Q */
+
+struct ARMGICv2mState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq spi[GICV2M_NUM_SPI_MAX];
+
+ uint32_t base_spi;
+ uint32_t num_spi;
+};
+
+static void gicv2m_set_irq(void *opaque, int irq)
+{
+ ARMGICv2mState *s = (ARMGICv2mState *)opaque;
+
+ qemu_irq_pulse(s->spi[irq]);
+}
+
+static uint64_t gicv2m_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ ARMGICv2mState *s = (ARMGICv2mState *)opaque;
+ uint32_t val;
+
+ if (size != 4) {
+ qemu_log_mask(LOG_GUEST_ERROR, "gicv2m_read: bad size %u\n", size);
+ return 0;
+ }
+
+ switch (offset) {
+ case V2M_MSI_TYPER:
+ val = (s->base_spi + 32) << 16;
+ val |= s->num_spi;
+ return val;
+ case V2M_MSI_IIDR:
+ /* We don't have any valid implementor so we leave that field as zero
+ * and we return 0 in the arch revision as per the spec.
+ */
+ return (PRODUCT_ID_QEMU << 20);
+ case V2M_IIDR0 ... V2M_IIDR11:
+ /* We do not implement any optional identification registers and the
+ * mandatory MSI_PIDR2 register reads as 0x0, so we capture all
+ * implementation defined registers here.
+ */
+ return 0;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gicv2m_read: Bad offset %x\n", (int)offset);
+ return 0;
+ }
+}
+
+static void gicv2m_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ ARMGICv2mState *s = (ARMGICv2mState *)opaque;
+
+ if (size != 2 && size != 4) {
+ qemu_log_mask(LOG_GUEST_ERROR, "gicv2m_write: bad size %u\n", size);
+ return;
+ }
+
+ switch (offset) {
+ case V2M_MSI_SETSPI_NS: {
+ int spi;
+
+ spi = (value & 0x3ff) - (s->base_spi + 32);
+ if (spi >= 0 && spi < s->num_spi) {
+ gicv2m_set_irq(s, spi);
+ }
+ return;
+ }
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gicv2m_write: Bad offset %x\n", (int)offset);
+ }
+}
+
+static const MemoryRegionOps gicv2m_ops = {
+ .read = gicv2m_read,
+ .write = gicv2m_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void gicv2m_realize(DeviceState *dev, Error **errp)
+{
+ ARMGICv2mState *s = ARM_GICV2M(dev);
+ int i;
+
+ if (s->num_spi > GICV2M_NUM_SPI_MAX) {
+ error_setg(errp,
+ "requested %u SPIs exceeds GICv2m frame maximum %d",
+ s->num_spi, GICV2M_NUM_SPI_MAX);
+ return;
+ }
+
+ if (s->base_spi + 32 > 1020 - s->num_spi) {
+ error_setg(errp,
+ "requested base SPI %u+%u exceeds max. number 1020",
+ s->base_spi + 32, s->num_spi);
+ return;
+ }
+
+ for (i = 0; i < s->num_spi; i++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->spi[i]);
+ }
+
+ msi_nonbroken = true;
+ kvm_gsi_direct_mapping = true;
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
+}
+
+static void gicv2m_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ ARMGICv2mState *s = ARM_GICV2M(obj);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &gicv2m_ops, s,
+ "gicv2m", 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static Property gicv2m_properties[] = {
+ DEFINE_PROP_UINT32("base-spi", ARMGICv2mState, base_spi, 0),
+ DEFINE_PROP_UINT32("num-spi", ARMGICv2mState, num_spi, 64),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void gicv2m_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, gicv2m_properties);
+ dc->realize = gicv2m_realize;
+}
+
+static const TypeInfo gicv2m_info = {
+ .name = TYPE_ARM_GICV2M,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(ARMGICv2mState),
+ .instance_init = gicv2m_init,
+ .class_init = gicv2m_class_init,
+};
+
+static void gicv2m_register_types(void)
+{
+ type_register_static(&gicv2m_info);
+}
+
+type_init(gicv2m_register_types)
diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c
new file mode 100644
index 000000000..9f5f815db
--- /dev/null
+++ b/hw/intc/arm_gicv3.c
@@ -0,0 +1,420 @@
+/*
+ * ARM Generic Interrupt Controller v3
+ *
+ * Copyright (c) 2015 Huawei.
+ * Copyright (c) 2016 Linaro Limited
+ * Written by Shlomo Pongratz, Peter Maydell
+ *
+ * This code is licensed under the GPL, version 2 or (at your option)
+ * any later version.
+ */
+
+/* This file contains implementation code for an interrupt controller
+ * which implements the GICv3 architecture. Specifically this is where
+ * the device class itself and the functions for handling interrupts
+ * coming in and going out live.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "hw/intc/arm_gicv3.h"
+#include "gicv3_internal.h"
+
+static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio)
+{
+ /* Return true if this IRQ at this priority should take
+ * precedence over the current recorded highest priority
+ * pending interrupt for this CPU. We also return true if
+ * the current recorded highest priority pending interrupt
+ * is the same as this one (a property which the calling code
+ * relies on).
+ */
+ if (prio < cs->hppi.prio) {
+ return true;
+ }
+ /* If multiple pending interrupts have the same priority then it is an
+ * IMPDEF choice which of them to signal to the CPU. We choose to
+ * signal the one with the lowest interrupt number.
+ */
+ if (prio == cs->hppi.prio && irq <= cs->hppi.irq) {
+ return true;
+ }
+ return false;
+}
+
+static uint32_t gicd_int_pending(GICv3State *s, int irq)
+{
+ /* Recalculate which distributor interrupts are actually pending
+ * in the group of 32 interrupts starting at irq (which should be a multiple
+ * of 32), and return a 32-bit integer which has a bit set for each
+ * interrupt that is eligible to be signaled to the CPU interface.
+ *
+ * An interrupt is pending if:
+ * + the PENDING latch is set OR it is level triggered and the input is 1
+ * + its ENABLE bit is set
+ * + the GICD enable bit for its group is set
+ * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
+ * Conveniently we can bulk-calculate this with bitwise operations.
+ */
+ uint32_t pend, grpmask;
+ uint32_t pending = *gic_bmp_ptr32(s->pending, irq);
+ uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq);
+ uint32_t level = *gic_bmp_ptr32(s->level, irq);
+ uint32_t group = *gic_bmp_ptr32(s->group, irq);
+ uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
+ uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
+ uint32_t active = *gic_bmp_ptr32(s->active, irq);
+
+ pend = pending | (~edge_trigger & level);
+ pend &= enable;
+ pend &= ~active;
+
+ if (s->gicd_ctlr & GICD_CTLR_DS) {
+ grpmod = 0;
+ }
+
+ grpmask = 0;
+ if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
+ grpmask |= group;
+ }
+ if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
+ grpmask |= (~group & grpmod);
+ }
+ if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) {
+ grpmask |= (~group & ~grpmod);
+ }
+ pend &= grpmask;
+
+ return pend;
+}
+
+static uint32_t gicr_int_pending(GICv3CPUState *cs)
+{
+ /* Recalculate which redistributor interrupts are actually pending,
+ * and return a 32-bit integer which has a bit set for each interrupt
+ * that is eligible to be signaled to the CPU interface.
+ *
+ * An interrupt is pending if:
+ * + the PENDING latch is set OR it is level triggered and the input is 1
+ * + its ENABLE bit is set
+ * + the GICD enable bit for its group is set
+ * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
+ * Conveniently we can bulk-calculate this with bitwise operations.
+ */
+ uint32_t pend, grpmask, grpmod;
+
+ pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
+ pend &= cs->gicr_ienabler0;
+ pend &= ~cs->gicr_iactiver0;
+
+ if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
+ grpmod = 0;
+ } else {
+ grpmod = cs->gicr_igrpmodr0;
+ }
+
+ grpmask = 0;
+ if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
+ grpmask |= cs->gicr_igroupr0;
+ }
+ if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
+ grpmask |= (~cs->gicr_igroupr0 & grpmod);
+ }
+ if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) {
+ grpmask |= (~cs->gicr_igroupr0 & ~grpmod);
+ }
+ pend &= grpmask;
+
+ return pend;
+}
+
+/* Update the interrupt status after state in a redistributor
+ * or CPU interface has changed, but don't tell the CPU i/f.
+ */
+static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
+{
+ /* Find the highest priority pending interrupt among the
+ * redistributor interrupts (SGIs and PPIs).
+ */
+ bool seenbetter = false;
+ uint8_t prio;
+ int i;
+ uint32_t pend;
+
+ /* Find out which redistributor interrupts are eligible to be
+ * signaled to the CPU interface.
+ */
+ pend = gicr_int_pending(cs);
+
+ if (pend) {
+ for (i = 0; i < GIC_INTERNAL; i++) {
+ if (!(pend & (1 << i))) {
+ continue;
+ }
+ prio = cs->gicr_ipriorityr[i];
+ if (irqbetter(cs, i, prio)) {
+ cs->hppi.irq = i;
+ cs->hppi.prio = prio;
+ seenbetter = true;
+ }
+ }
+ }
+
+ if (seenbetter) {
+ cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
+ }
+
+ if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable &&
+ (cs->hpplpi.prio != 0xff)) {
+ if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) {
+ cs->hppi.irq = cs->hpplpi.irq;
+ cs->hppi.prio = cs->hpplpi.prio;
+ cs->hppi.grp = cs->hpplpi.grp;
+ seenbetter = true;
+ }
+ }
+
+ /* If the best interrupt we just found would preempt whatever
+ * was the previous best interrupt before this update, then
+ * we know it's definitely the best one now.
+ * If we didn't find an interrupt that would preempt the previous
+ * best, and the previous best is outside our range (or there was no
+ * previous pending interrupt at all), then that is still valid, and
+ * we leave it as the best.
+ * Otherwise, we need to do a full update (because the previous best
+ * interrupt has reduced in priority and any other interrupt could
+ * now be the new best one).
+ */
+ if (!seenbetter && cs->hppi.prio != 0xff &&
+ (cs->hppi.irq < GIC_INTERNAL ||
+ cs->hppi.irq >= GICV3_LPI_INTID_START)) {
+ gicv3_full_update_noirqset(cs->gic);
+ }
+}
+
+/* Update the GIC status after state in a redistributor or
+ * CPU interface has changed, and inform the CPU i/f of
+ * its new highest priority pending interrupt.
+ */
+void gicv3_redist_update(GICv3CPUState *cs)
+{
+ gicv3_redist_update_noirqset(cs);
+ gicv3_cpuif_update(cs);
+}
+
+/* Update the GIC status after state in the distributor has
+ * changed affecting @len interrupts starting at @start,
+ * but don't tell the CPU i/f.
+ */
+static void gicv3_update_noirqset(GICv3State *s, int start, int len)
+{
+ int i;
+ uint8_t prio;
+ uint32_t pend = 0;
+
+ assert(start >= GIC_INTERNAL);
+ assert(len > 0);
+
+ for (i = 0; i < s->num_cpu; i++) {
+ s->cpu[i].seenbetter = false;
+ }
+
+ /* Find the highest priority pending interrupt in this range. */
+ for (i = start; i < start + len; i++) {
+ GICv3CPUState *cs;
+
+ if (i == start || (i & 0x1f) == 0) {
+ /* Calculate the next 32 bits worth of pending status */
+ pend = gicd_int_pending(s, i & ~0x1f);
+ }
+
+ if (!(pend & (1 << (i & 0x1f)))) {
+ continue;
+ }
+ cs = s->gicd_irouter_target[i];
+ if (!cs) {
+ /* Interrupts targeting no implemented CPU should remain pending
+ * and not be forwarded to any CPU.
+ */
+ continue;
+ }
+ prio = s->gicd_ipriority[i];
+ if (irqbetter(cs, i, prio)) {
+ cs->hppi.irq = i;
+ cs->hppi.prio = prio;
+ cs->seenbetter = true;
+ }
+ }
+
+ /* If the best interrupt we just found would preempt whatever
+ * was the previous best interrupt before this update, then
+ * we know it's definitely the best one now.
+ * If we didn't find an interrupt that would preempt the previous
+ * best, and the previous best is outside our range (or there was
+ * no previous pending interrupt at all), then that
+ * is still valid, and we leave it as the best.
+ * Otherwise, we need to do a full update (because the previous best
+ * interrupt has reduced in priority and any other interrupt could
+ * now be the new best one).
+ */
+ for (i = 0; i < s->num_cpu; i++) {
+ GICv3CPUState *cs = &s->cpu[i];
+
+ if (cs->seenbetter) {
+ cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
+ }
+
+ if (!cs->seenbetter && cs->hppi.prio != 0xff &&
+ cs->hppi.irq >= start && cs->hppi.irq < start + len) {
+ gicv3_full_update_noirqset(s);
+ break;
+ }
+ }
+}
+
+void gicv3_update(GICv3State *s, int start, int len)
+{
+ int i;
+
+ gicv3_update_noirqset(s, start, len);
+ for (i = 0; i < s->num_cpu; i++) {
+ gicv3_cpuif_update(&s->cpu[i]);
+ }
+}
+
+void gicv3_full_update_noirqset(GICv3State *s)
+{
+ /* Completely recalculate the GIC status from scratch, but
+ * don't update any outbound IRQ lines.
+ */
+ int i;
+
+ for (i = 0; i < s->num_cpu; i++) {
+ s->cpu[i].hppi.prio = 0xff;
+ }
+
+ /* Note that we can guarantee that these functions will not
+ * recursively call back into gicv3_full_update(), because
+ * at each point the "previous best" is always outside the
+ * range we ask them to update.
+ */
+ gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL);
+
+ for (i = 0; i < s->num_cpu; i++) {
+ gicv3_redist_update_noirqset(&s->cpu[i]);
+ }
+}
+
+void gicv3_full_update(GICv3State *s)
+{
+ /* Completely recalculate the GIC status from scratch, including
+ * updating outbound IRQ lines.
+ */
+ int i;
+
+ gicv3_full_update_noirqset(s);
+ for (i = 0; i < s->num_cpu; i++) {
+ gicv3_cpuif_update(&s->cpu[i]);
+ }
+}
+
+/* Process a change in an external IRQ input. */
+static void gicv3_set_irq(void *opaque, int irq, int level)
+{
+ /* Meaning of the 'irq' parameter:
+ * [0..N-1] : external interrupts
+ * [N..N+31] : PPI (internal) interrupts for CPU 0
+ * [N+32..N+63] : PPI (internal interrupts for CPU 1
+ * ...
+ */
+ GICv3State *s = opaque;
+
+ if (irq < (s->num_irq - GIC_INTERNAL)) {
+ /* external interrupt (SPI) */
+ gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level);
+ } else {
+ /* per-cpu interrupt (PPI) */
+ int cpu;
+
+ irq -= (s->num_irq - GIC_INTERNAL);
+ cpu = irq / GIC_INTERNAL;
+ irq %= GIC_INTERNAL;
+ assert(cpu < s->num_cpu);
+ /* Raising SGIs via this function would be a bug in how the board
+ * model wires up interrupts.
+ */
+ assert(irq >= GIC_NR_SGIS);
+ gicv3_redist_set_irq(&s->cpu[cpu], irq, level);
+ }
+}
+
+static void arm_gicv3_post_load(GICv3State *s)
+{
+ int i;
+ /* Recalculate our cached idea of the current highest priority
+ * pending interrupt, but don't set IRQ or FIQ lines.
+ */
+ for (i = 0; i < s->num_cpu; i++) {
+ gicv3_redist_update_lpi_only(&s->cpu[i]);
+ }
+ gicv3_full_update_noirqset(s);
+ /* Repopulate the cache of GICv3CPUState pointers for target CPUs */
+ gicv3_cache_all_target_cpustates(s);
+}
+
+static const MemoryRegionOps gic_ops[] = {
+ {
+ .read_with_attrs = gicv3_dist_read,
+ .write_with_attrs = gicv3_dist_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ },
+ {
+ .read_with_attrs = gicv3_redist_read,
+ .write_with_attrs = gicv3_redist_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ }
+};
+
+static void arm_gic_realize(DeviceState *dev, Error **errp)
+{
+ /* Device instance realize function for the GIC sysbus device */
+ GICv3State *s = ARM_GICV3(dev);
+ ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s);
+ Error *local_err = NULL;
+
+ agc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops);
+
+ gicv3_init_cpuif(s);
+}
+
+static void arm_gicv3_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
+ ARMGICv3Class *agc = ARM_GICV3_CLASS(klass);
+
+ agcc->post_load = arm_gicv3_post_load;
+ device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
+}
+
+static const TypeInfo arm_gicv3_info = {
+ .name = TYPE_ARM_GICV3,
+ .parent = TYPE_ARM_GICV3_COMMON,
+ .instance_size = sizeof(GICv3State),
+ .class_init = arm_gicv3_class_init,
+ .class_size = sizeof(ARMGICv3Class),
+};
+
+static void arm_gicv3_register_types(void)
+{
+ type_register_static(&arm_gicv3_info);
+}
+
+type_init(arm_gicv3_register_types)
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
new file mode 100644
index 000000000..9884d2e39
--- /dev/null
+++ b/hw/intc/arm_gicv3_common.c
@@ -0,0 +1,561 @@
+/*
+ * ARM GICv3 support - common bits of emulated and KVM kernel model
+ *
+ * Copyright (c) 2012 Linaro Limited
+ * Copyright (c) 2015 Huawei.
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Written by Peter Maydell
+ * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "hw/core/cpu.h"
+#include "hw/intc/arm_gicv3_common.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "gicv3_internal.h"
+#include "hw/arm/linux-boot-if.h"
+#include "sysemu/kvm.h"
+
+
+static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs)
+{
+ if (cs->gicd_no_migration_shift_bug) {
+ return;
+ }
+
+ /* Older versions of QEMU had a bug in the handling of state save/restore
+ * to the KVM GICv3: they got the offset in the bitmap arrays wrong,
+ * so that instead of the data for external interrupts 32 and up
+ * starting at bit position 32 in the bitmap, it started at bit
+ * position 64. If we're receiving data from a QEMU with that bug,
+ * we must move the data down into the right place.
+ */
+ memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8,
+ sizeof(cs->group) - GIC_INTERNAL / 8);
+ memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8,
+ sizeof(cs->grpmod) - GIC_INTERNAL / 8);
+ memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8,
+ sizeof(cs->enabled) - GIC_INTERNAL / 8);
+ memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8,
+ sizeof(cs->pending) - GIC_INTERNAL / 8);
+ memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8,
+ sizeof(cs->active) - GIC_INTERNAL / 8);
+ memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8,
+ sizeof(cs->edge_trigger) - GIC_INTERNAL / 8);
+
+ /*
+ * While this new version QEMU doesn't have this kind of bug as we fix it,
+ * so it needs to set the flag to true to indicate that and it's necessary
+ * for next migration to work from this new version QEMU.
+ */
+ cs->gicd_no_migration_shift_bug = true;
+}
+
+static int gicv3_pre_save(void *opaque)
+{
+ GICv3State *s = (GICv3State *)opaque;
+ ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
+
+ if (c->pre_save) {
+ c->pre_save(s);
+ }
+
+ return 0;
+}
+
+static int gicv3_post_load(void *opaque, int version_id)
+{
+ GICv3State *s = (GICv3State *)opaque;
+ ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
+
+ gicv3_gicd_no_migration_shift_bug_post_load(s);
+
+ if (c->post_load) {
+ c->post_load(s);
+ }
+ return 0;
+}
+
+static bool virt_state_needed(void *opaque)
+{
+ GICv3CPUState *cs = opaque;
+
+ return cs->num_list_regs != 0;
+}
+
+static const VMStateDescription vmstate_gicv3_cpu_virt = {
+ .name = "arm_gicv3_cpu/virt",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = virt_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4),
+ VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState),
+ VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX),
+ VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int vmstate_gicv3_cpu_pre_load(void *opaque)
+{
+ GICv3CPUState *cs = opaque;
+
+ /*
+ * If the sre_el1 subsection is not transferred this
+ * means SRE_EL1 is 0x7 (which might not be the same as
+ * our reset value).
+ */
+ cs->icc_sre_el1 = 0x7;
+ return 0;
+}
+
+static bool icc_sre_el1_reg_needed(void *opaque)
+{
+ GICv3CPUState *cs = opaque;
+
+ return cs->icc_sre_el1 != 7;
+}
+
+const VMStateDescription vmstate_gicv3_cpu_sre_el1 = {
+ .name = "arm_gicv3_cpu/sre_el1",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = icc_sre_el1_reg_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(icc_sre_el1, GICv3CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_gicv3_cpu = {
+ .name = "arm_gicv3_cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_load = vmstate_gicv3_cpu_pre_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(level, GICv3CPUState),
+ VMSTATE_UINT32(gicr_ctlr, GICv3CPUState),
+ VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2),
+ VMSTATE_UINT32(gicr_waker, GICv3CPUState),
+ VMSTATE_UINT64(gicr_propbaser, GICv3CPUState),
+ VMSTATE_UINT64(gicr_pendbaser, GICv3CPUState),
+ VMSTATE_UINT32(gicr_igroupr0, GICv3CPUState),
+ VMSTATE_UINT32(gicr_ienabler0, GICv3CPUState),
+ VMSTATE_UINT32(gicr_ipendr0, GICv3CPUState),
+ VMSTATE_UINT32(gicr_iactiver0, GICv3CPUState),
+ VMSTATE_UINT32(edge_trigger, GICv3CPUState),
+ VMSTATE_UINT32(gicr_igrpmodr0, GICv3CPUState),
+ VMSTATE_UINT32(gicr_nsacr, GICv3CPUState),
+ VMSTATE_UINT8_ARRAY(gicr_ipriorityr, GICv3CPUState, GIC_INTERNAL),
+ VMSTATE_UINT64_ARRAY(icc_ctlr_el1, GICv3CPUState, 2),
+ VMSTATE_UINT64(icc_pmr_el1, GICv3CPUState),
+ VMSTATE_UINT64_ARRAY(icc_bpr, GICv3CPUState, 3),
+ VMSTATE_UINT64_2DARRAY(icc_apr, GICv3CPUState, 3, 4),
+ VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3),
+ VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_gicv3_cpu_virt,
+ &vmstate_gicv3_cpu_sre_el1,
+ NULL
+ }
+};
+
+static int gicv3_pre_load(void *opaque)
+{
+ GICv3State *cs = opaque;
+
+ /*
+ * The gicd_no_migration_shift_bug flag is used for migration compatibility
+ * for old version QEMU which may have the GICD bmp shift bug under KVM mode.
+ * Strictly, what we want to know is whether the migration source is using
+ * KVM. Since we don't have any way to determine that, we look at whether the
+ * destination is using KVM; this is close enough because for the older QEMU
+ * versions with this bug KVM -> TCG migration didn't work anyway. If the
+ * source is a newer QEMU without this bug it will transmit the migration
+ * subsection which sets the flag to true; otherwise it will remain set to
+ * the value we select here.
+ */
+ if (kvm_enabled()) {
+ cs->gicd_no_migration_shift_bug = false;
+ }
+
+ return 0;
+}
+
+static bool needed_always(void *opaque)
+{
+ return true;
+}
+
+const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = {
+ .name = "arm_gicv3/gicd_no_migration_shift_bug",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = needed_always,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_gicv3 = {
+ .name = "arm_gicv3",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_load = gicv3_pre_load,
+ .pre_save = gicv3_pre_save,
+ .post_load = gicv3_post_load,
+ .priority = MIG_PRI_GICV3,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(gicd_ctlr, GICv3State),
+ VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2),
+ VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE),
+ VMSTATE_UINT32_ARRAY(grpmod, GICv3State, GICV3_BMP_SIZE),
+ VMSTATE_UINT32_ARRAY(enabled, GICv3State, GICV3_BMP_SIZE),
+ VMSTATE_UINT32_ARRAY(pending, GICv3State, GICV3_BMP_SIZE),
+ VMSTATE_UINT32_ARRAY(active, GICv3State, GICV3_BMP_SIZE),
+ VMSTATE_UINT32_ARRAY(level, GICv3State, GICV3_BMP_SIZE),
+ VMSTATE_UINT32_ARRAY(edge_trigger, GICv3State, GICV3_BMP_SIZE),
+ VMSTATE_UINT8_ARRAY(gicd_ipriority, GICv3State, GICV3_MAXIRQ),
+ VMSTATE_UINT64_ARRAY(gicd_irouter, GICv3State, GICV3_MAXIRQ),
+ VMSTATE_UINT32_ARRAY(gicd_nsacr, GICv3State,
+ DIV_ROUND_UP(GICV3_MAXIRQ, 16)),
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, GICv3State, num_cpu,
+ vmstate_gicv3_cpu, GICv3CPUState),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_gicv3_gicd_no_migration_shift_bug,
+ NULL
+ }
+};
+
+void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
+ const MemoryRegionOps *ops)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(s);
+ int i;
+ int cpuidx;
+
+ /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
+ * GPIO array layout is thus:
+ * [0..N-1] spi
+ * [N..N+31] PPIs for CPU 0
+ * [N+32..N+63] PPIs for CPU 1
+ * ...
+ */
+ i = s->num_irq - GIC_INTERNAL + GIC_INTERNAL * s->num_cpu;
+ qdev_init_gpio_in(DEVICE(s), handler, i);
+
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->cpu[i].parent_irq);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->cpu[i].parent_fiq);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->cpu[i].parent_virq);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq);
+ }
+
+ memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
+ "gicv3_dist", 0x10000);
+ sysbus_init_mmio(sbd, &s->iomem_dist);
+
+ s->redist_regions = g_new0(GICv3RedistRegion, s->nb_redist_regions);
+ cpuidx = 0;
+ for (i = 0; i < s->nb_redist_regions; i++) {
+ char *name = g_strdup_printf("gicv3_redist_region[%d]", i);
+ GICv3RedistRegion *region = &s->redist_regions[i];
+
+ region->gic = s;
+ region->cpuidx = cpuidx;
+ cpuidx += s->redist_region_count[i];
+
+ memory_region_init_io(&region->iomem, OBJECT(s),
+ ops ? &ops[1] : NULL, region, name,
+ s->redist_region_count[i] * GICV3_REDIST_SIZE);
+ sysbus_init_mmio(sbd, &region->iomem);
+ g_free(name);
+ }
+}
+
+static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
+{
+ GICv3State *s = ARM_GICV3_COMMON(dev);
+ int i, rdist_capacity, cpuidx;
+
+ /* revision property is actually reserved and currently used only in order
+ * to keep the interface compatible with GICv2 code, avoiding extra
+ * conditions. However, in future it could be used, for example, if we
+ * implement GICv4.
+ */
+ if (s->revision != 3) {
+ error_setg(errp, "unsupported GIC revision %d", s->revision);
+ return;
+ }
+
+ if (s->num_irq > GICV3_MAXIRQ) {
+ error_setg(errp,
+ "requested %u interrupt lines exceeds GIC maximum %d",
+ s->num_irq, GICV3_MAXIRQ);
+ return;
+ }
+ if (s->num_irq < GIC_INTERNAL) {
+ error_setg(errp,
+ "requested %u interrupt lines is below GIC minimum %d",
+ s->num_irq, GIC_INTERNAL);
+ return;
+ }
+
+ /* ITLinesNumber is represented as (N / 32) - 1, so this is an
+ * implementation imposed restriction, not an architectural one,
+ * so we don't have to deal with bitfields where only some of the
+ * bits in a 32-bit word should be valid.
+ */
+ if (s->num_irq % 32) {
+ error_setg(errp,
+ "%d interrupt lines unsupported: not divisible by 32",
+ s->num_irq);
+ return;
+ }
+
+ if (s->lpi_enable && !s->dma) {
+ error_setg(errp, "Redist-ITS: Guest 'sysmem' reference link not set");
+ return;
+ }
+
+ rdist_capacity = 0;
+ for (i = 0; i < s->nb_redist_regions; i++) {
+ rdist_capacity += s->redist_region_count[i];
+ }
+ if (rdist_capacity < s->num_cpu) {
+ error_setg(errp, "Capacity of the redist regions(%d) "
+ "is less than number of vcpus(%d)",
+ rdist_capacity, s->num_cpu);
+ return;
+ }
+
+ s->cpu = g_new0(GICv3CPUState, s->num_cpu);
+
+ for (i = 0; i < s->num_cpu; i++) {
+ CPUState *cpu = qemu_get_cpu(i);
+ uint64_t cpu_affid;
+
+ s->cpu[i].cpu = cpu;
+ s->cpu[i].gic = s;
+ /* Store GICv3CPUState in CPUARMState gicv3state pointer */
+ gicv3_set_gicv3state(cpu, &s->cpu[i]);
+
+ /* Pre-construct the GICR_TYPER:
+ * For our implementation:
+ * Top 32 bits are the affinity value of the associated CPU
+ * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table)
+ * Processor_Number == CPU index starting from 0
+ * DPGS == 0 (GICR_CTLR.DPG* not supported)
+ * Last == 1 if this is the last redistributor in a series of
+ * contiguous redistributor pages
+ * DirectLPI == 0 (direct injection of LPIs not supported)
+ * VLPIS == 0 (virtual LPIs not supported)
+ * PLPIS == 0 (physical LPIs not supported)
+ */
+ cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL);
+
+ /* The CPU mp-affinity property is in MPIDR register format; squash
+ * the affinity bytes into 32 bits as the GICR_TYPER has them.
+ */
+ cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) |
+ (cpu_affid & 0xFFFFFF);
+ s->cpu[i].gicr_typer = (cpu_affid << 32) |
+ (1 << 24) |
+ (i << 8);
+
+ if (s->lpi_enable) {
+ s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS;
+ }
+ }
+
+ /*
+ * Now go through and set GICR_TYPER.Last for the final
+ * redistributor in each region.
+ */
+ cpuidx = 0;
+ for (i = 0; i < s->nb_redist_regions; i++) {
+ cpuidx += s->redist_region_count[i];
+ s->cpu[cpuidx - 1].gicr_typer |= GICR_TYPER_LAST;
+ }
+}
+
+static void arm_gicv3_finalize(Object *obj)
+{
+ GICv3State *s = ARM_GICV3_COMMON(obj);
+
+ g_free(s->redist_region_count);
+}
+
+static void arm_gicv3_common_reset(DeviceState *dev)
+{
+ GICv3State *s = ARM_GICV3_COMMON(dev);
+ int i;
+
+ for (i = 0; i < s->num_cpu; i++) {
+ GICv3CPUState *cs = &s->cpu[i];
+
+ cs->level = 0;
+ cs->gicr_ctlr = 0;
+ cs->gicr_statusr[GICV3_S] = 0;
+ cs->gicr_statusr[GICV3_NS] = 0;
+ cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep;
+ cs->gicr_propbaser = 0;
+ cs->gicr_pendbaser = 0;
+ /* If we're resetting a TZ-aware GIC as if secure firmware
+ * had set it up ready to start a kernel in non-secure, we
+ * need to set interrupts to group 1 so the kernel can use them.
+ * Otherwise they reset to group 0 like the hardware.
+ */
+ if (s->irq_reset_nonsecure) {
+ cs->gicr_igroupr0 = 0xffffffff;
+ } else {
+ cs->gicr_igroupr0 = 0;
+ }
+
+ cs->gicr_ienabler0 = 0;
+ cs->gicr_ipendr0 = 0;
+ cs->gicr_iactiver0 = 0;
+ cs->edge_trigger = 0xffff;
+ cs->gicr_igrpmodr0 = 0;
+ cs->gicr_nsacr = 0;
+ memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr));
+
+ cs->hppi.prio = 0xff;
+ cs->hpplpi.prio = 0xff;
+
+ /* State in the CPU interface must *not* be reset here, because it
+ * is part of the CPU's reset domain, not the GIC device's.
+ */
+ }
+
+ /* For our implementation affinity routing is always enabled */
+ if (s->security_extn) {
+ s->gicd_ctlr = GICD_CTLR_ARE_S | GICD_CTLR_ARE_NS;
+ } else {
+ s->gicd_ctlr = GICD_CTLR_DS | GICD_CTLR_ARE;
+ }
+
+ s->gicd_statusr[GICV3_S] = 0;
+ s->gicd_statusr[GICV3_NS] = 0;
+
+ memset(s->group, 0, sizeof(s->group));
+ memset(s->grpmod, 0, sizeof(s->grpmod));
+ memset(s->enabled, 0, sizeof(s->enabled));
+ memset(s->pending, 0, sizeof(s->pending));
+ memset(s->active, 0, sizeof(s->active));
+ memset(s->level, 0, sizeof(s->level));
+ memset(s->edge_trigger, 0, sizeof(s->edge_trigger));
+ memset(s->gicd_ipriority, 0, sizeof(s->gicd_ipriority));
+ memset(s->gicd_irouter, 0, sizeof(s->gicd_irouter));
+ memset(s->gicd_nsacr, 0, sizeof(s->gicd_nsacr));
+ /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must
+ * write these to get sane behaviour and we need not populate the
+ * pointer cache here; however having the cache be different for
+ * "happened to be 0 from reset" and "guest wrote 0" would be
+ * too confusing.
+ */
+ gicv3_cache_all_target_cpustates(s);
+
+ if (s->irq_reset_nonsecure) {
+ /* If we're resetting a TZ-aware GIC as if secure firmware
+ * had set it up ready to start a kernel in non-secure, we
+ * need to set interrupts to group 1 so the kernel can use them.
+ * Otherwise they reset to group 0 like the hardware.
+ */
+ for (i = GIC_INTERNAL; i < s->num_irq; i++) {
+ gicv3_gicd_group_set(s, i);
+ }
+ }
+ s->gicd_no_migration_shift_bug = true;
+}
+
+static void arm_gic_common_linux_init(ARMLinuxBootIf *obj,
+ bool secure_boot)
+{
+ GICv3State *s = ARM_GICV3_COMMON(obj);
+
+ if (s->security_extn && !secure_boot) {
+ /* We're directly booting a kernel into NonSecure. If this GIC
+ * implements the security extensions then we must configure it
+ * to have all the interrupts be NonSecure (this is a job that
+ * is done by the Secure boot firmware in real hardware, and in
+ * this mode QEMU is acting as a minimalist firmware-and-bootloader
+ * equivalent).
+ */
+ s->irq_reset_nonsecure = true;
+ }
+}
+
+static Property arm_gicv3_common_properties[] = {
+ DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1),
+ DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
+ DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
+ DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
+ DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
+ DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
+ redist_region_count, qdev_prop_uint32, uint32_t),
+ DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION,
+ MemoryRegion *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void arm_gicv3_common_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass);
+
+ dc->reset = arm_gicv3_common_reset;
+ dc->realize = arm_gicv3_common_realize;
+ device_class_set_props(dc, arm_gicv3_common_properties);
+ dc->vmsd = &vmstate_gicv3;
+ albifc->arm_linux_init = arm_gic_common_linux_init;
+}
+
+static const TypeInfo arm_gicv3_common_type = {
+ .name = TYPE_ARM_GICV3_COMMON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(GICv3State),
+ .class_size = sizeof(ARMGICv3CommonClass),
+ .class_init = arm_gicv3_common_class_init,
+ .instance_finalize = arm_gicv3_finalize,
+ .abstract = true,
+ .interfaces = (InterfaceInfo []) {
+ { TYPE_ARM_LINUX_BOOT_IF },
+ { },
+ },
+};
+
+static void register_types(void)
+{
+ type_register_static(&arm_gicv3_common_type);
+}
+
+type_init(register_types)
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
new file mode 100644
index 000000000..85fc369e5
--- /dev/null
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -0,0 +1,2700 @@
+/*
+ * ARM Generic Interrupt Controller v3
+ *
+ * Copyright (c) 2016 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This code is licensed under the GPL, version 2 or (at your option)
+ * any later version.
+ */
+
+/* This file contains the code for the system register interface
+ * portions of the GICv3.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+#include "trace.h"
+#include "gicv3_internal.h"
+#include "hw/irq.h"
+#include "cpu.h"
+
+void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ CPUARMState *env = &arm_cpu->env;
+
+ env->gicv3state = (void *)s;
+};
+
+static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
+{
+ return env->gicv3state;
+}
+
+static bool gicv3_use_ns_bank(CPUARMState *env)
+{
+ /* Return true if we should use the NonSecure bank for a banked GIC
+ * CPU interface register. Note that this differs from the
+ * access_secure_reg() function because GICv3 banked registers are
+ * banked even for AArch64, unlike the other CPU system registers.
+ */
+ return !arm_is_secure_below_el3(env);
+}
+
+/* The minimum BPR for the virtual interface is a configurable property */
+static inline int icv_min_vbpr(GICv3CPUState *cs)
+{
+ return 7 - cs->vprebits;
+}
+
+/* Simple accessor functions for LR fields */
+static uint32_t ich_lr_vintid(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
+}
+
+static uint32_t ich_lr_pintid(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
+}
+
+static uint32_t ich_lr_prio(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
+}
+
+static int ich_lr_state(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
+}
+
+static bool icv_access(CPUARMState *env, int hcr_flags)
+{
+ /* Return true if this ICC_ register access should really be
+ * directed to an ICV_ access. hcr_flags is a mask of
+ * HCR_EL2 bits to check: we treat this as an ICV_ access
+ * if we are in NS EL1 and at least one of the specified
+ * HCR_EL2 bits is set.
+ *
+ * ICV registers fall into four categories:
+ * * access if NS EL1 and HCR_EL2.FMO == 1:
+ * all ICV regs with '0' in their name
+ * * access if NS EL1 and HCR_EL2.IMO == 1:
+ * all ICV regs with '1' in their name
+ * * access if NS EL1 and either IMO or FMO == 1:
+ * CTLR, DIR, PMR, RPR
+ */
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
+ bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
+
+ return flagmatch && arm_current_el(env) == 1
+ && !arm_is_secure_below_el3(env);
+}
+
+static int read_vbpr(GICv3CPUState *cs, int grp)
+{
+ /* Read VBPR value out of the VMCR field (caller must handle
+ * VCBPR effects if required)
+ */
+ if (grp == GICV3_G0) {
+ return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
+ ICH_VMCR_EL2_VBPR0_LENGTH);
+ } else {
+ return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
+ ICH_VMCR_EL2_VBPR1_LENGTH);
+ }
+}
+
+static void write_vbpr(GICv3CPUState *cs, int grp, int value)
+{
+ /* Write new VBPR1 value, handling the "writing a value less than
+ * the minimum sets it to the minimum" semantics.
+ */
+ int min = icv_min_vbpr(cs);
+
+ if (grp != GICV3_G0) {
+ min++;
+ }
+
+ value = MAX(value, min);
+
+ if (grp == GICV3_G0) {
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
+ ICH_VMCR_EL2_VBPR0_LENGTH, value);
+ } else {
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
+ ICH_VMCR_EL2_VBPR1_LENGTH, value);
+ }
+}
+
+static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
+{
+ /* Return a mask word which clears the unimplemented priority bits
+ * from a priority value for a virtual interrupt. (Not to be confused
+ * with the group priority, whose mask depends on the value of VBPR
+ * for the interrupt group.)
+ */
+ return ~0U << (8 - cs->vpribits);
+}
+
+static int ich_highest_active_virt_prio(GICv3CPUState *cs)
+{
+ /* Calculate the current running priority based on the set bits
+ * in the ICH Active Priority Registers.
+ */
+ int i;
+ int aprmax = 1 << (cs->vprebits - 5);
+
+ assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
+
+ for (i = 0; i < aprmax; i++) {
+ uint32_t apr = cs->ich_apr[GICV3_G0][i] |
+ cs->ich_apr[GICV3_G1NS][i];
+
+ if (!apr) {
+ continue;
+ }
+ return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
+ }
+ /* No current active interrupts: return idle priority */
+ return 0xff;
+}
+
+static int hppvi_index(GICv3CPUState *cs)
+{
+ /* Return the list register index of the highest priority pending
+ * virtual interrupt, as per the HighestPriorityVirtualInterrupt
+ * pseudocode. If no pending virtual interrupts, return -1.
+ */
+ int idx = -1;
+ int i;
+ /* Note that a list register entry with a priority of 0xff will
+ * never be reported by this function; this is the architecturally
+ * correct behaviour.
+ */
+ int prio = 0xff;
+
+ if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
+ /* Both groups disabled, definitely nothing to do */
+ return idx;
+ }
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+ int thisprio;
+
+ if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
+ /* Not Pending */
+ continue;
+ }
+
+ /* Ignore interrupts if relevant group enable not set */
+ if (lr & ICH_LR_EL2_GROUP) {
+ if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ continue;
+ }
+ } else {
+ if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
+ continue;
+ }
+ }
+
+ thisprio = ich_lr_prio(lr);
+
+ if (thisprio < prio) {
+ prio = thisprio;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
+{
+ /* Return a mask word which clears the subpriority bits from
+ * a priority value for a virtual interrupt in the specified group.
+ * This depends on the VBPR value.
+ * If using VBPR0 then:
+ * a BPR of 0 means the group priority bits are [7:1];
+ * a BPR of 1 means they are [7:2], and so on down to
+ * a BPR of 7 meaning no group priority bits at all.
+ * If using VBPR1 then:
+ * a BPR of 0 is impossible (the minimum value is 1)
+ * a BPR of 1 means the group priority bits are [7:1];
+ * a BPR of 2 means they are [7:2], and so on down to
+ * a BPR of 7 meaning the group priority is [7].
+ *
+ * Which BPR to use depends on the group of the interrupt and
+ * the current ICH_VMCR_EL2.VCBPR settings.
+ *
+ * This corresponds to the VGroupBits() pseudocode.
+ */
+ int bpr;
+
+ if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
+ group = GICV3_G0;
+ }
+
+ bpr = read_vbpr(cs, group);
+ if (group == GICV3_G1NS) {
+ assert(bpr > 0);
+ bpr--;
+ }
+
+ return ~0U << (bpr + 1);
+}
+
+static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
+{
+ /* Return true if we can signal this virtual interrupt defined by
+ * the given list register value; see the pseudocode functions
+ * CanSignalVirtualInterrupt and CanSignalVirtualInt.
+ * Compare also icc_hppi_can_preempt() which is the non-virtual
+ * equivalent of these checks.
+ */
+ int grp;
+ uint32_t mask, prio, rprio, vpmr;
+
+ if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
+ /* Virtual interface disabled */
+ return false;
+ }
+
+ /* We don't need to check that this LR is in Pending state because
+ * that has already been done in hppvi_index().
+ */
+
+ prio = ich_lr_prio(lr);
+ vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
+ ICH_VMCR_EL2_VPMR_LENGTH);
+
+ if (prio >= vpmr) {
+ /* Priority mask masks this interrupt */
+ return false;
+ }
+
+ rprio = ich_highest_active_virt_prio(cs);
+ if (rprio == 0xff) {
+ /* No running interrupt so we can preempt */
+ return true;
+ }
+
+ grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+
+ mask = icv_gprio_mask(cs, grp);
+
+ /* We only preempt a running interrupt if the pending interrupt's
+ * group priority is sufficient (the subpriorities are not considered).
+ */
+ if ((prio & mask) < (rprio & mask)) {
+ return true;
+ }
+
+ return false;
+}
+
+static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
+ uint32_t *misr)
+{
+ /* Return a set of bits indicating the EOI maintenance interrupt status
+ * for each list register. The EOI maintenance interrupt status is
+ * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
+ * (see the GICv3 spec for the ICH_EISR_EL2 register).
+ * If misr is not NULL then we should also collect the information
+ * about the MISR.EOI, MISR.NP and MISR.U bits.
+ */
+ uint32_t value = 0;
+ int validcount = 0;
+ bool seenpending = false;
+ int i;
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+
+ if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
+ == ICH_LR_EL2_EOI) {
+ value |= (1 << i);
+ }
+ if ((lr & ICH_LR_EL2_STATE_MASK)) {
+ validcount++;
+ }
+ if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
+ seenpending = true;
+ }
+ }
+
+ if (misr) {
+ if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
+ *misr |= ICH_MISR_EL2_U;
+ }
+ if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
+ *misr |= ICH_MISR_EL2_NP;
+ }
+ if (value) {
+ *misr |= ICH_MISR_EL2_EOI;
+ }
+ }
+ return value;
+}
+
+static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
+{
+ /* Return a set of bits indicating the maintenance interrupt status
+ * (as seen in the ICH_MISR_EL2 register).
+ */
+ uint32_t value = 0;
+
+ /* Scan list registers and fill in the U, NP and EOI bits */
+ eoi_maintenance_interrupt_state(cs, &value);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) &&
+ (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) {
+ value |= ICH_MISR_EL2_LRENP;
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
+ (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
+ value |= ICH_MISR_EL2_VGRP0E;
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
+ !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ value |= ICH_MISR_EL2_VGRP0D;
+ }
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
+ (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ value |= ICH_MISR_EL2_VGRP1E;
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
+ !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ value |= ICH_MISR_EL2_VGRP1D;
+ }
+
+ return value;
+}
+
+static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
+{
+ /* Tell the CPU about any pending virtual interrupts or
+ * maintenance interrupts, following a change to the state
+ * of the CPU interface relevant to virtual interrupts.
+ *
+ * CAUTION: this function will call qemu_set_irq() on the
+ * CPU maintenance IRQ line, which is typically wired up
+ * to the GIC as a per-CPU interrupt. This means that it
+ * will recursively call back into the GIC code via
+ * gicv3_redist_set_irq() and thus into the CPU interface code's
+ * gicv3_cpuif_update(). It is therefore important that this
+ * function is only called as the final action of a CPU interface
+ * register write implementation, after all the GIC state
+ * fields have been updated. gicv3_cpuif_update() also must
+ * not cause this function to be called, but that happens
+ * naturally as a result of there being no architectural
+ * linkage between the physical and virtual GIC logic.
+ */
+ int idx;
+ int irqlevel = 0;
+ int fiqlevel = 0;
+ int maintlevel = 0;
+ ARMCPU *cpu = ARM_CPU(cs->cpu);
+
+ idx = hppvi_index(cs);
+ trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
+ if (idx >= 0) {
+ uint64_t lr = cs->ich_lr_el2[idx];
+
+ if (icv_hppi_can_preempt(cs, lr)) {
+ /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
+ if (lr & ICH_LR_EL2_GROUP) {
+ irqlevel = 1;
+ } else {
+ fiqlevel = 1;
+ }
+ }
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
+ maintenance_interrupt_state(cs) != 0) {
+ maintlevel = 1;
+ }
+
+ trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
+ irqlevel, maintlevel);
+
+ qemu_set_irq(cs->parent_vfiq, fiqlevel);
+ qemu_set_irq(cs->parent_virq, irqlevel);
+ qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
+}
+
+static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
+ uint64_t value = cs->ich_apr[grp][regno];
+
+ trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
+
+ trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+
+ cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
+
+ gicv3_cpuif_virt_update(cs);
+ return;
+}
+
+static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
+ uint64_t bpr;
+ bool satinc = false;
+
+ if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
+ /* reads return bpr0 + 1 saturated to 7, writes ignored */
+ grp = GICV3_G0;
+ satinc = true;
+ }
+
+ bpr = read_vbpr(cs, grp);
+
+ if (satinc) {
+ bpr++;
+ bpr = MIN(bpr, 7);
+ }
+
+ trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
+
+ return bpr;
+}
+
+static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
+
+ trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
+
+ if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
+ /* reads return bpr0 + 1 saturated to 7, writes ignored */
+ return;
+ }
+
+ write_vbpr(cs, grp, value);
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
+ ICH_VMCR_EL2_VPMR_LENGTH);
+
+ trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
+
+ value &= icv_fullprio_mask(cs);
+
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
+ ICH_VMCR_EL2_VPMR_LENGTH, value);
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int enbit;
+ uint64_t value;
+
+ enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
+ value = extract64(cs->ich_vmcr_el2, enbit, 1);
+
+ trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int enbit;
+
+ trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
+
+ enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
+
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
+ * should match the ones reported in ich_vtr_read().
+ */
+ value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
+ (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
+
+ if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
+ value |= ICC_CTLR_EL1_EOIMODE;
+ }
+
+ if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
+ value |= ICC_CTLR_EL1_CBPR;
+ }
+
+ trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
+
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
+ 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
+ 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int prio = ich_highest_active_virt_prio(cs);
+
+ trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
+ return prio;
+}
+
+static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
+ int idx = hppvi_index(cs);
+ uint64_t value = INTID_SPURIOUS;
+
+ if (idx >= 0) {
+ uint64_t lr = cs->ich_lr_el2[idx];
+ int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+
+ if (grp == thisgrp) {
+ value = ich_lr_vintid(lr);
+ }
+ }
+
+ trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
+{
+ /* Activate the interrupt in the specified list register
+ * by moving it from Pending to Active state, and update the
+ * Active Priority Registers.
+ */
+ uint32_t mask = icv_gprio_mask(cs, grp);
+ int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
+ int aprbit = prio >> (8 - cs->vprebits);
+ int regno = aprbit / 32;
+ int regbit = aprbit % 32;
+
+ cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
+ cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
+ cs->ich_apr[grp][regno] |= (1 << regbit);
+}
+
+static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
+ int idx = hppvi_index(cs);
+ uint64_t intid = INTID_SPURIOUS;
+
+ if (idx >= 0) {
+ uint64_t lr = cs->ich_lr_el2[idx];
+ int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+
+ if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
+ intid = ich_lr_vintid(lr);
+ if (!gicv3_intid_is_special(intid)) {
+ icv_activate_irq(cs, idx, grp);
+ } else {
+ /* Interrupt goes from Pending to Invalid */
+ cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
+ /* We will now return the (bogus) ID from the list register,
+ * as per the pseudocode.
+ */
+ }
+ }
+ }
+
+ trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), intid);
+
+ gicv3_cpuif_virt_update(cs);
+
+ return intid;
+}
+
+static int icc_highest_active_prio(GICv3CPUState *cs)
+{
+ /* Calculate the current running priority based on the set bits
+ * in the Active Priority Registers.
+ */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
+ uint32_t apr = cs->icc_apr[GICV3_G0][i] |
+ cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
+
+ if (!apr) {
+ continue;
+ }
+ return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
+ }
+ /* No current active interrupts: return idle priority */
+ return 0xff;
+}
+
+static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
+{
+ /* Return a mask word which clears the subpriority bits from
+ * a priority value for an interrupt in the specified group.
+ * This depends on the BPR value. For CBPR0 (S or NS):
+ * a BPR of 0 means the group priority bits are [7:1];
+ * a BPR of 1 means they are [7:2], and so on down to
+ * a BPR of 7 meaning no group priority bits at all.
+ * For CBPR1 NS:
+ * a BPR of 0 is impossible (the minimum value is 1)
+ * a BPR of 1 means the group priority bits are [7:1];
+ * a BPR of 2 means they are [7:2], and so on down to
+ * a BPR of 7 meaning the group priority is [7].
+ *
+ * Which BPR to use depends on the group of the interrupt and
+ * the current ICC_CTLR.CBPR settings.
+ *
+ * This corresponds to the GroupBits() pseudocode.
+ */
+ int bpr;
+
+ if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
+ (group == GICV3_G1NS &&
+ cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
+ group = GICV3_G0;
+ }
+
+ bpr = cs->icc_bpr[group] & 7;
+
+ if (group == GICV3_G1NS) {
+ assert(bpr > 0);
+ bpr--;
+ }
+
+ return ~0U << (bpr + 1);
+}
+
+static bool icc_no_enabled_hppi(GICv3CPUState *cs)
+{
+ /* Return true if there is no pending interrupt, or the
+ * highest priority pending interrupt is in a group which has been
+ * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
+ */
+ return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
+}
+
+static bool icc_hppi_can_preempt(GICv3CPUState *cs)
+{
+ /* Return true if we have a pending interrupt of sufficient
+ * priority to preempt.
+ */
+ int rprio;
+ uint32_t mask;
+
+ if (icc_no_enabled_hppi(cs)) {
+ return false;
+ }
+
+ if (cs->hppi.prio >= cs->icc_pmr_el1) {
+ /* Priority mask masks this interrupt */
+ return false;
+ }
+
+ rprio = icc_highest_active_prio(cs);
+ if (rprio == 0xff) {
+ /* No currently running interrupt so we can preempt */
+ return true;
+ }
+
+ mask = icc_gprio_mask(cs, cs->hppi.grp);
+
+ /* We only preempt a running interrupt if the pending interrupt's
+ * group priority is sufficient (the subpriorities are not considered).
+ */
+ if ((cs->hppi.prio & mask) < (rprio & mask)) {
+ return true;
+ }
+
+ return false;
+}
+
+void gicv3_cpuif_update(GICv3CPUState *cs)
+{
+ /* Tell the CPU about its highest priority pending interrupt */
+ int irqlevel = 0;
+ int fiqlevel = 0;
+ ARMCPU *cpu = ARM_CPU(cs->cpu);
+ CPUARMState *env = &cpu->env;
+
+ g_assert(qemu_mutex_iothread_locked());
+
+ trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
+ cs->hppi.grp, cs->hppi.prio);
+
+ if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
+ /* If a Security-enabled GIC sends a G1S interrupt to a
+ * Security-disabled CPU, we must treat it as if it were G0.
+ */
+ cs->hppi.grp = GICV3_G0;
+ }
+
+ if (icc_hppi_can_preempt(cs)) {
+ /* We have an interrupt: should we signal it as IRQ or FIQ?
+ * This is described in the GICv3 spec section 4.6.2.
+ */
+ bool isfiq;
+
+ switch (cs->hppi.grp) {
+ case GICV3_G0:
+ isfiq = true;
+ break;
+ case GICV3_G1:
+ isfiq = (!arm_is_secure(env) ||
+ (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
+ break;
+ case GICV3_G1NS:
+ isfiq = arm_is_secure(env);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (isfiq) {
+ fiqlevel = 1;
+ } else {
+ irqlevel = 1;
+ }
+ }
+
+ trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
+
+ qemu_set_irq(cs->parent_fiq, fiqlevel);
+ qemu_set_irq(cs->parent_irq, irqlevel);
+}
+
+static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint32_t value = cs->icc_pmr_el1;
+
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_pmr_read(env, ri);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
+ (env->cp15.scr_el3 & SCR_FIQ)) {
+ /* NS access and Group 0 is inaccessible to NS: return the
+ * NS view of the current priority
+ */
+ if ((value & 0x80) == 0) {
+ /* Secure priorities not visible to NS */
+ value = 0;
+ } else if (value != 0xff) {
+ value = (value << 1) & 0xff;
+ }
+ }
+
+ trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
+
+ return value;
+}
+
+static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_pmr_write(env, ri, value);
+ }
+
+ trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
+
+ value &= 0xff;
+
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
+ (env->cp15.scr_el3 & SCR_FIQ)) {
+ /* NS access and Group 0 is inaccessible to NS: return the
+ * NS view of the current priority
+ */
+ if (!(cs->icc_pmr_el1 & 0x80)) {
+ /* Current PMR in the secure range, don't allow NS to change it */
+ return;
+ }
+ value = (value >> 1) | 0x80;
+ }
+ cs->icc_pmr_el1 = value;
+ gicv3_cpuif_update(cs);
+}
+
+static void icc_activate_irq(GICv3CPUState *cs, int irq)
+{
+ /* Move the interrupt from the Pending state to Active, and update
+ * the Active Priority Registers
+ */
+ uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
+ int prio = cs->hppi.prio & mask;
+ int aprbit = prio >> 1;
+ int regno = aprbit / 32;
+ int regbit = aprbit % 32;
+
+ cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
+
+ if (irq < GIC_INTERNAL) {
+ cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
+ cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
+ gicv3_redist_update(cs);
+ } else if (irq < GICV3_LPI_INTID_START) {
+ gicv3_gicd_active_set(cs->gic, irq);
+ gicv3_gicd_pending_clear(cs->gic, irq);
+ gicv3_update(cs->gic, irq, 1);
+ } else {
+ gicv3_redist_lpi_pending(cs, irq, 0);
+ }
+}
+
+static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
+{
+ /* Return the highest priority pending interrupt register value
+ * for group 0.
+ */
+ bool irq_is_secure;
+
+ if (cs->hppi.prio == 0xff) {
+ return INTID_SPURIOUS;
+ }
+
+ /* Check whether we can return the interrupt or if we should return
+ * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
+ * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
+ * is always zero.)
+ */
+ irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
+ (cs->hppi.grp != GICV3_G1NS));
+
+ if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
+ return INTID_SPURIOUS;
+ }
+ if (irq_is_secure && !arm_is_secure(env)) {
+ /* Secure interrupts not visible to Nonsecure */
+ return INTID_SPURIOUS;
+ }
+
+ if (cs->hppi.grp != GICV3_G0) {
+ /* Indicate to EL3 that there's a Group 1 interrupt for the other
+ * state pending.
+ */
+ return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
+ }
+
+ return cs->hppi.irq;
+}
+
+static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
+{
+ /* Return the highest priority pending interrupt register value
+ * for group 1.
+ */
+ bool irq_is_secure;
+
+ if (cs->hppi.prio == 0xff) {
+ return INTID_SPURIOUS;
+ }
+
+ /* Check whether we can return the interrupt or if we should return
+ * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
+ * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
+ * is always zero.)
+ */
+ irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
+ (cs->hppi.grp != GICV3_G1NS));
+
+ if (cs->hppi.grp == GICV3_G0) {
+ /* Group 0 interrupts not visible via HPPIR1 */
+ return INTID_SPURIOUS;
+ }
+ if (irq_is_secure) {
+ if (!arm_is_secure(env)) {
+ /* Secure interrupts not visible in Non-secure */
+ return INTID_SPURIOUS;
+ }
+ } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
+ /* Group 1 non-secure interrupts not visible in Secure EL1 */
+ return INTID_SPURIOUS;
+ }
+
+ return cs->hppi.irq;
+}
+
+static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t intid;
+
+ if (icv_access(env, HCR_FMO)) {
+ return icv_iar_read(env, ri);
+ }
+
+ if (!icc_hppi_can_preempt(cs)) {
+ intid = INTID_SPURIOUS;
+ } else {
+ intid = icc_hppir0_value(cs, env);
+ }
+
+ if (!gicv3_intid_is_special(intid)) {
+ icc_activate_irq(cs, intid);
+ }
+
+ trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
+ return intid;
+}
+
+static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t intid;
+
+ if (icv_access(env, HCR_IMO)) {
+ return icv_iar_read(env, ri);
+ }
+
+ if (!icc_hppi_can_preempt(cs)) {
+ intid = INTID_SPURIOUS;
+ } else {
+ intid = icc_hppir1_value(cs, env);
+ }
+
+ if (!gicv3_intid_is_special(intid)) {
+ icc_activate_irq(cs, intid);
+ }
+
+ trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
+ return intid;
+}
+
+static void icc_drop_prio(GICv3CPUState *cs, int grp)
+{
+ /* Drop the priority of the currently active interrupt in
+ * the specified group.
+ *
+ * Note that we can guarantee (because of the requirement to nest
+ * ICC_IAR reads [which activate an interrupt and raise priority]
+ * with ICC_EOIR writes [which drop the priority for the interrupt])
+ * that the interrupt we're being called for is the highest priority
+ * active interrupt, meaning that it has the lowest set bit in the
+ * APR registers.
+ *
+ * If the guest does not honour the ordering constraints then the
+ * behaviour of the GIC is UNPREDICTABLE, which for us means that
+ * the values of the APR registers might become incorrect and the
+ * running priority will be wrong, so interrupts that should preempt
+ * might not do so, and interrupts that should not preempt might do so.
+ */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) {
+ uint64_t *papr = &cs->icc_apr[grp][i];
+
+ if (!*papr) {
+ continue;
+ }
+ /* Clear the lowest set bit */
+ *papr &= *papr - 1;
+ break;
+ }
+
+ /* running priority change means we need an update for this cpu i/f */
+ gicv3_cpuif_update(cs);
+}
+
+static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
+{
+ /* Return true if we should split priority drop and interrupt
+ * deactivation, ie whether the relevant EOIMode bit is set.
+ */
+ if (arm_is_el3_or_mon(env)) {
+ return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
+ }
+ if (arm_is_secure_below_el3(env)) {
+ return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
+ } else {
+ return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
+ }
+}
+
+static int icc_highest_active_group(GICv3CPUState *cs)
+{
+ /* Return the group with the highest priority active interrupt.
+ * We can do this by just comparing the APRs to see which one
+ * has the lowest set bit.
+ * (If more than one group is active at the same priority then
+ * we're in UNPREDICTABLE territory.)
+ */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
+ int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
+ int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
+ int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
+
+ if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
+ return GICV3_G1NS;
+ }
+ if (g1ctz < g0ctz) {
+ return GICV3_G1;
+ }
+ if (g0ctz < 32) {
+ return GICV3_G0;
+ }
+ }
+ /* No set active bits? UNPREDICTABLE; return -1 so the caller
+ * ignores the spurious EOI attempt.
+ */
+ return -1;
+}
+
+static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
+{
+ if (irq < GIC_INTERNAL) {
+ cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
+ gicv3_redist_update(cs);
+ } else {
+ gicv3_gicd_active_clear(cs->gic, irq);
+ gicv3_update(cs->gic, irq, 1);
+ }
+}
+
+static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
+{
+ /* Return true if we should split priority drop and interrupt
+ * deactivation, ie whether the virtual EOIMode bit is set.
+ */
+ return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
+}
+
+static int icv_find_active(GICv3CPUState *cs, int irq)
+{
+ /* Given an interrupt number for an active interrupt, return the index
+ * of the corresponding list register, or -1 if there is no match.
+ * Corresponds to FindActiveVirtualInterrupt pseudocode.
+ */
+ int i;
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+
+ if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
+{
+ /* Deactivate the interrupt in the specified list register index */
+ uint64_t lr = cs->ich_lr_el2[idx];
+
+ if (lr & ICH_LR_EL2_HW) {
+ /* Deactivate the associated physical interrupt */
+ int pirq = ich_lr_pintid(lr);
+
+ if (pirq < INTID_SECURE) {
+ icc_deactivate_irq(cs, pirq);
+ }
+ }
+
+ /* Clear the 'active' part of the state, so ActivePending->Pending
+ * and Active->Invalid.
+ */
+ lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
+ cs->ich_lr_el2[idx] = lr;
+}
+
+static void icv_increment_eoicount(GICv3CPUState *cs)
+{
+ /* Increment the EOICOUNT field in ICH_HCR_EL2 */
+ int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
+ ICH_HCR_EL2_EOICOUNT_LENGTH);
+
+ cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
+ ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
+}
+
+static int icv_drop_prio(GICv3CPUState *cs)
+{
+ /* Drop the priority of the currently active virtual interrupt
+ * (favouring group 0 if there is a set active bit at
+ * the same priority for both group 0 and group 1).
+ * Return the priority value for the bit we just cleared,
+ * or 0xff if no bits were set in the AP registers at all.
+ * Note that though the ich_apr[] are uint64_t only the low
+ * 32 bits are actually relevant.
+ */
+ int i;
+ int aprmax = 1 << (cs->vprebits - 5);
+
+ assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
+
+ for (i = 0; i < aprmax; i++) {
+ uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
+ uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
+ int apr0count, apr1count;
+
+ if (!*papr0 && !*papr1) {
+ continue;
+ }
+
+ /* We can't just use the bit-twiddling hack icc_drop_prio() does
+ * because we need to return the bit number we cleared so
+ * it can be compared against the list register's priority field.
+ */
+ apr0count = ctz32(*papr0);
+ apr1count = ctz32(*papr1);
+
+ if (apr0count <= apr1count) {
+ *papr0 &= *papr0 - 1;
+ return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
+ } else {
+ *papr1 &= *papr1 - 1;
+ return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
+ }
+ }
+ return 0xff;
+}
+
+static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Deactivate interrupt */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int idx;
+ int irq = value & 0xffffff;
+
+ trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
+
+ if (irq >= GICV3_MAXIRQ) {
+ /* Also catches special interrupt numbers and LPIs */
+ return;
+ }
+
+ if (!icv_eoi_split(env, cs)) {
+ return;
+ }
+
+ idx = icv_find_active(cs, irq);
+
+ if (idx < 0) {
+ /* No list register matching this, so increment the EOI count
+ * (might trigger a maintenance interrupt)
+ */
+ icv_increment_eoicount(cs);
+ } else {
+ icv_deactivate_irq(cs, idx);
+ }
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* End of Interrupt */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int irq = value & 0xffffff;
+ int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
+ int idx, dropprio;
+
+ trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
+
+ if (gicv3_intid_is_special(irq)) {
+ return;
+ }
+
+ /* We implement the IMPDEF choice of "drop priority before doing
+ * error checks" (because that lets us avoid scanning the AP
+ * registers twice).
+ */
+ dropprio = icv_drop_prio(cs);
+ if (dropprio == 0xff) {
+ /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
+ * whether the list registers are checked in this
+ * situation; we choose not to.
+ */
+ return;
+ }
+
+ idx = icv_find_active(cs, irq);
+
+ if (idx < 0) {
+ /* No valid list register corresponding to EOI ID */
+ icv_increment_eoicount(cs);
+ } else {
+ uint64_t lr = cs->ich_lr_el2[idx];
+ int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+ int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
+
+ if (thisgrp == grp && lr_gprio == dropprio) {
+ if (!icv_eoi_split(env, cs)) {
+ /* Priority drop and deactivate not split: deactivate irq now */
+ icv_deactivate_irq(cs, idx);
+ }
+ }
+ }
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* End of Interrupt */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int irq = value & 0xffffff;
+ int grp;
+ bool is_eoir0 = ri->crm == 8;
+
+ if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
+ icv_eoir_write(env, ri, value);
+ return;
+ }
+
+ trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
+
+ if ((irq >= cs->gic->num_irq) &&
+ !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
+ /* This handles two cases:
+ * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
+ * to the GICC_EOIR, the GIC ignores that write.
+ * 2. If software writes the number of a non-existent interrupt
+ * this must be a subcase of "value written does not match the last
+ * valid interrupt value read from the Interrupt Acknowledge
+ * register" and so this is UNPREDICTABLE. We choose to ignore it.
+ */
+ return;
+ }
+
+ grp = icc_highest_active_group(cs);
+ switch (grp) {
+ case GICV3_G0:
+ if (!is_eoir0) {
+ return;
+ }
+ if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
+ && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
+ return;
+ }
+ break;
+ case GICV3_G1:
+ if (is_eoir0) {
+ return;
+ }
+ if (!arm_is_secure(env)) {
+ return;
+ }
+ break;
+ case GICV3_G1NS:
+ if (is_eoir0) {
+ return;
+ }
+ if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
+ return;
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: IRQ %d isn't active\n", __func__, irq);
+ return;
+ }
+
+ icc_drop_prio(cs, grp);
+
+ if (!icc_eoi_split(env, cs)) {
+ /* Priority drop and deactivate not split: deactivate irq now */
+ icc_deactivate_irq(cs, irq);
+ }
+}
+
+static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ if (icv_access(env, HCR_FMO)) {
+ return icv_hppir_read(env, ri);
+ }
+
+ value = icc_hppir0_value(cs, env);
+ trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ if (icv_access(env, HCR_IMO)) {
+ return icv_hppir_read(env, ri);
+ }
+
+ value = icc_hppir1_value(cs, env);
+ trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
+ bool satinc = false;
+ uint64_t bpr;
+
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ return icv_bpr_read(env, ri);
+ }
+
+ if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
+ grp = GICV3_G1NS;
+ }
+
+ if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
+ (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
+ /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
+ * modify BPR0
+ */
+ grp = GICV3_G0;
+ }
+
+ if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
+ (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
+ /* reads return bpr0 + 1 sat to 7, writes ignored */
+ grp = GICV3_G0;
+ satinc = true;
+ }
+
+ bpr = cs->icc_bpr[grp];
+ if (satinc) {
+ bpr++;
+ bpr = MIN(bpr, 7);
+ }
+
+ trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
+
+ return bpr;
+}
+
+static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
+ uint64_t minval;
+
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ icv_bpr_write(env, ri, value);
+ return;
+ }
+
+ trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
+
+ if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
+ grp = GICV3_G1NS;
+ }
+
+ if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
+ (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
+ /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
+ * modify BPR0
+ */
+ grp = GICV3_G0;
+ }
+
+ if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
+ (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
+ /* reads return bpr0 + 1 sat to 7, writes ignored */
+ return;
+ }
+
+ minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR;
+ if (value < minval) {
+ value = minval;
+ }
+
+ cs->icc_bpr[grp] = value & 7;
+ gicv3_cpuif_update(cs);
+}
+
+static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ int regno = ri->opc2 & 3;
+ int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
+
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ return icv_ap_read(env, ri);
+ }
+
+ if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
+ grp = GICV3_G1NS;
+ }
+
+ value = cs->icc_apr[grp][regno];
+
+ trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ int regno = ri->opc2 & 3;
+ int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
+
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ icv_ap_write(env, ri, value);
+ return;
+ }
+
+ trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+
+ if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
+ grp = GICV3_G1NS;
+ }
+
+ /* It's not possible to claim that a Non-secure interrupt is active
+ * at a priority outside the Non-secure range (128..255), since this
+ * would otherwise allow malicious NS code to block delivery of S interrupts
+ * by writing a bad value to these registers.
+ */
+ if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
+ return;
+ }
+
+ cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
+ gicv3_cpuif_update(cs);
+}
+
+static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Deactivate interrupt */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int irq = value & 0xffffff;
+ bool irq_is_secure, single_sec_state, irq_is_grp0;
+ bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
+
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ icv_dir_write(env, ri, value);
+ return;
+ }
+
+ trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
+
+ if (irq >= cs->gic->num_irq) {
+ /* Also catches special interrupt numbers and LPIs */
+ return;
+ }
+
+ if (!icc_eoi_split(env, cs)) {
+ return;
+ }
+
+ int grp = gicv3_irq_group(cs->gic, cs, irq);
+
+ single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
+ irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
+ irq_is_grp0 = grp == GICV3_G0;
+
+ /* Check whether we're allowed to deactivate this interrupt based
+ * on its group and the current CPU state.
+ * These checks are laid out to correspond to the spec's pseudocode.
+ */
+ route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
+ route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
+ /* No need to include !IsSecure in route_*_to_el2 as it's only
+ * tested in cases where we know !IsSecure is true.
+ */
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
+ route_fiq_to_el2 = hcr_el2 & HCR_FMO;
+ route_irq_to_el2 = hcr_el2 & HCR_IMO;
+
+ switch (arm_current_el(env)) {
+ case 3:
+ break;
+ case 2:
+ if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
+ break;
+ }
+ if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
+ break;
+ }
+ return;
+ case 1:
+ if (!arm_is_secure_below_el3(env)) {
+ if (single_sec_state && irq_is_grp0 &&
+ !route_fiq_to_el3 && !route_fiq_to_el2) {
+ break;
+ }
+ if (!irq_is_secure && !irq_is_grp0 &&
+ !route_irq_to_el3 && !route_irq_to_el2) {
+ break;
+ }
+ } else {
+ if (irq_is_grp0 && !route_fiq_to_el3) {
+ break;
+ }
+ if (!irq_is_grp0 &&
+ (!irq_is_secure || !single_sec_state) &&
+ !route_irq_to_el3) {
+ break;
+ }
+ }
+ return;
+ default:
+ g_assert_not_reached();
+ }
+
+ icc_deactivate_irq(cs, irq);
+}
+
+static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int prio;
+
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_rpr_read(env, ri);
+ }
+
+ prio = icc_highest_active_prio(cs);
+
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
+ /* NS GIC access and Group 0 is inaccessible to NS */
+ if ((prio & 0x80) == 0) {
+ /* NS mustn't see priorities in the Secure half of the range */
+ prio = 0;
+ } else if (prio != 0xff) {
+ /* Non-idle priority: show the Non-secure view of it */
+ prio = (prio << 1) & 0xff;
+ }
+ }
+
+ trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
+ return prio;
+}
+
+static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
+ uint64_t value, int grp, bool ns)
+{
+ GICv3State *s = cs->gic;
+
+ /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
+ uint64_t aff = extract64(value, 48, 8) << 16 |
+ extract64(value, 32, 8) << 8 |
+ extract64(value, 16, 8);
+ uint32_t targetlist = extract64(value, 0, 16);
+ uint32_t irq = extract64(value, 24, 4);
+ bool irm = extract64(value, 40, 1);
+ int i;
+
+ if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
+ /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
+ * interrupts as Group 0 interrupts and must send Secure Group 0
+ * interrupts to the target CPUs.
+ */
+ grp = GICV3_G0;
+ }
+
+ trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
+ aff, targetlist);
+
+ for (i = 0; i < s->num_cpu; i++) {
+ GICv3CPUState *ocs = &s->cpu[i];
+
+ if (irm) {
+ /* IRM == 1 : route to all CPUs except self */
+ if (cs == ocs) {
+ continue;
+ }
+ } else {
+ /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
+ * where the corresponding bit is set in targetlist
+ */
+ int aff0;
+
+ if (ocs->gicr_typer >> 40 != aff) {
+ continue;
+ }
+ aff0 = extract64(ocs->gicr_typer, 32, 8);
+ if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
+ continue;
+ }
+ }
+
+ /* The redistributor will check against its own GICR_NSACR as needed */
+ gicv3_redist_send_sgi(ocs, grp, irq, ns);
+ }
+}
+
+static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Generate Secure Group 0 SGI. */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ bool ns = !arm_is_secure(env);
+
+ icc_generate_sgi(env, cs, value, GICV3_G0, ns);
+}
+
+static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Generate Group 1 SGI for the current Security state */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp;
+ bool ns = !arm_is_secure(env);
+
+ grp = ns ? GICV3_G1NS : GICV3_G1;
+ icc_generate_sgi(env, cs, value, grp, ns);
+}
+
+static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Generate Group 1 SGI for the Security state that is not
+ * the current state
+ */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp;
+ bool ns = !arm_is_secure(env);
+
+ grp = ns ? GICV3_G1 : GICV3_G1NS;
+ icc_generate_sgi(env, cs, value, grp, ns);
+}
+
+static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
+ uint64_t value;
+
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ return icv_igrpen_read(env, ri);
+ }
+
+ if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
+ grp = GICV3_G1NS;
+ }
+
+ value = cs->icc_igrpen[grp];
+ trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
+
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ icv_igrpen_write(env, ri, value);
+ return;
+ }
+
+ trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
+
+ if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
+ grp = GICV3_G1NS;
+ }
+
+ cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
+ gicv3_cpuif_update(cs);
+}
+
+static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
+ value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
+ trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
+
+ /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
+ cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
+ cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
+ gicv3_cpuif_update(cs);
+}
+
+static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
+ uint64_t value;
+
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_ctlr_read(env, ri);
+ }
+
+ value = cs->icc_ctlr_el1[bank];
+ trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
+ uint64_t mask;
+
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ icv_ctlr_write(env, ri, value);
+ return;
+ }
+
+ trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
+
+ /* Only CBPR and EOIMODE can be RW;
+ * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
+ * the asseciated priority-based routing of them);
+ * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
+ ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
+ mask = ICC_CTLR_EL1_EOIMODE;
+ } else {
+ mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
+ }
+
+ cs->icc_ctlr_el1[bank] &= ~mask;
+ cs->icc_ctlr_el1[bank] |= (value & mask);
+ gicv3_cpuif_update(cs);
+}
+
+
+static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ value = cs->icc_ctlr_el3;
+ if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
+ value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
+ }
+ if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
+ value |= ICC_CTLR_EL3_CBPR_EL1NS;
+ }
+ if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
+ value |= ICC_CTLR_EL3_EOIMODE_EL1S;
+ }
+ if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
+ value |= ICC_CTLR_EL3_CBPR_EL1S;
+ }
+
+ trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t mask;
+
+ trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
+
+ /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
+ cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
+ if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
+ cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
+ }
+ if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
+ cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
+ }
+
+ cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
+ if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
+ cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
+ }
+ if (value & ICC_CTLR_EL3_CBPR_EL1S) {
+ cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
+ }
+
+ /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */
+ mask = ICC_CTLR_EL3_EOIMODE_EL3;
+
+ cs->icc_ctlr_el3 &= ~mask;
+ cs->icc_ctlr_el3 |= (value & mask);
+ gicv3_cpuif_update(cs);
+}
+
+static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ CPAccessResult r = CP_ACCESS_OK;
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
+ el == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
+ switch (el) {
+ case 1:
+ /* Note that arm_hcr_el2_eff takes secure state into account. */
+ if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
+ r = CP_ACCESS_TRAP_EL3;
+ }
+ break;
+ case 2:
+ r = CP_ACCESS_TRAP_EL3;
+ break;
+ case 3:
+ if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
+ r = CP_ACCESS_TRAP_EL3;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
+ r = CP_ACCESS_TRAP;
+ }
+ return r;
+}
+
+static CPAccessResult gicv3_dir_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
+ arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return gicv3_irqfiq_access(env, ri, isread);
+}
+
+static CPAccessResult gicv3_sgi_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ if (arm_current_el(env) == 1 &&
+ (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return gicv3_irqfiq_access(env, ri, isread);
+}
+
+static CPAccessResult gicv3_fiq_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ CPAccessResult r = CP_ACCESS_OK;
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
+ el == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ if (env->cp15.scr_el3 & SCR_FIQ) {
+ switch (el) {
+ case 1:
+ if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
+ r = CP_ACCESS_TRAP_EL3;
+ }
+ break;
+ case 2:
+ r = CP_ACCESS_TRAP_EL3;
+ break;
+ case 3:
+ if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
+ r = CP_ACCESS_TRAP_EL3;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
+ r = CP_ACCESS_TRAP;
+ }
+ return r;
+}
+
+static CPAccessResult gicv3_irq_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ CPAccessResult r = CP_ACCESS_OK;
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
+ el == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ if (env->cp15.scr_el3 & SCR_IRQ) {
+ switch (el) {
+ case 1:
+ if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
+ r = CP_ACCESS_TRAP_EL3;
+ }
+ break;
+ case 2:
+ r = CP_ACCESS_TRAP_EL3;
+ break;
+ case 3:
+ if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
+ r = CP_ACCESS_TRAP_EL3;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
+ r = CP_ACCESS_TRAP;
+ }
+ return r;
+}
+
+static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
+ (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
+ (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
+ cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
+ (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
+ (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
+ cs->icc_pmr_el1 = 0;
+ cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
+ cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
+ cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS;
+ memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
+ memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
+ cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
+ (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
+ (7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
+
+ memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
+ cs->ich_hcr_el2 = 0;
+ memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
+ cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
+ ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
+ (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
+}
+
+static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
+ { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
+ .readfn = icc_pmr_read,
+ .writefn = icc_pmr_write,
+ /* We hang the whole cpu interface reset routine off here
+ * rather than parcelling it out into one little function
+ * per register
+ */
+ .resetfn = icc_reset,
+ },
+ { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_R, .accessfn = gicv3_fiq_access,
+ .readfn = icc_iar0_read,
+ },
+ { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_W, .accessfn = gicv3_fiq_access,
+ .writefn = icc_eoir_write,
+ },
+ { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_R, .accessfn = gicv3_fiq_access,
+ .readfn = icc_hppir0_read,
+ },
+ { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_fiq_access,
+ .readfn = icc_bpr_read,
+ .writefn = icc_bpr_write,
+ },
+ { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_fiq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_fiq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_fiq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_fiq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ /* All the ICC_AP1R*_EL1 registers are banked */
+ { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_W, .accessfn = gicv3_dir_access,
+ .writefn = icc_dir_write,
+ },
+ { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_R, .accessfn = gicv3_irqfiq_access,
+ .readfn = icc_rpr_read,
+ },
+ { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
+ .writefn = icc_sgi1r_write,
+ },
+ { .name = "ICC_SGI1R",
+ .cp = 15, .opc1 = 0, .crm = 12,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
+ .writefn = icc_sgi1r_write,
+ },
+ { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
+ .writefn = icc_asgi1r_write,
+ },
+ { .name = "ICC_ASGI1R",
+ .cp = 15, .opc1 = 1, .crm = 12,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
+ .writefn = icc_asgi1r_write,
+ },
+ { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
+ .writefn = icc_sgi0r_write,
+ },
+ { .name = "ICC_SGI0R",
+ .cp = 15, .opc1 = 2, .crm = 12,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
+ .writefn = icc_sgi0r_write,
+ },
+ { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_R, .accessfn = gicv3_irq_access,
+ .readfn = icc_iar1_read,
+ },
+ { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_W, .accessfn = gicv3_irq_access,
+ .writefn = icc_eoir_write,
+ },
+ { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_R, .accessfn = gicv3_irq_access,
+ .readfn = icc_hppir1_read,
+ },
+ /* This register is banked */
+ { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irq_access,
+ .readfn = icc_bpr_read,
+ .writefn = icc_bpr_write,
+ },
+ /* This register is banked */
+ { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
+ .readfn = icc_ctlr_el1_read,
+ .writefn = icc_ctlr_el1_write,
+ },
+ { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
+ .type = ARM_CP_NO_RAW | ARM_CP_CONST,
+ .access = PL1_RW,
+ /* We don't support IRQ/FIQ bypass and system registers are
+ * always enabled, so all our bits are RAZ/WI or RAO/WI.
+ * This register is banked but since it's constant we don't
+ * need to do anything special.
+ */
+ .resetvalue = 0x7,
+ },
+ { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_fiq_access,
+ .readfn = icc_igrpen_read,
+ .writefn = icc_igrpen_write,
+ },
+ /* This register is banked */
+ { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irq_access,
+ .readfn = icc_igrpen_read,
+ .writefn = icc_igrpen_write,
+ },
+ { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
+ .type = ARM_CP_NO_RAW | ARM_CP_CONST,
+ .access = PL2_RW,
+ /* We don't support IRQ/FIQ bypass and system registers are
+ * always enabled, so all our bits are RAZ/WI or RAO/WI.
+ */
+ .resetvalue = 0xf,
+ },
+ { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL3_RW,
+ .readfn = icc_ctlr_el3_read,
+ .writefn = icc_ctlr_el3_write,
+ },
+ { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
+ .type = ARM_CP_NO_RAW | ARM_CP_CONST,
+ .access = PL3_RW,
+ /* We don't support IRQ/FIQ bypass and system registers are
+ * always enabled, so all our bits are RAZ/WI or RAO/WI.
+ */
+ .resetvalue = 0xf,
+ },
+ { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL3_RW,
+ .readfn = icc_igrpen1_el3_read,
+ .writefn = icc_igrpen1_el3_write,
+ },
+ REGINFO_SENTINEL
+};
+
+static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
+ uint64_t value;
+
+ value = cs->ich_apr[grp][regno];
+ trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
+
+ trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+
+ cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = cs->ich_hcr_el2;
+
+ trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
+
+ value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
+ ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
+ ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
+ ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
+ ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
+
+ cs->ich_hcr_el2 = value;
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = cs->ich_vmcr_el2;
+
+ trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
+
+ value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
+ ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
+ ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
+ value |= ICH_VMCR_EL2_VFIQEN;
+
+ cs->ich_vmcr_el2 = value;
+ /* Enforce "writing BPRs to less than minimum sets them to the minimum"
+ * by reading and writing back the fields.
+ */
+ write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
+ write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 | ((ri->crm & 1) << 3);
+ uint64_t value;
+
+ /* This read function handles all of:
+ * 64-bit reads of the whole LR
+ * 32-bit reads of the low half of the LR
+ * 32-bit reads of the high half of the LR
+ */
+ if (ri->state == ARM_CP_STATE_AA32) {
+ if (ri->crm >= 14) {
+ value = extract64(cs->ich_lr_el2[regno], 32, 32);
+ trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
+ } else {
+ value = extract64(cs->ich_lr_el2[regno], 0, 32);
+ trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
+ }
+ } else {
+ value = cs->ich_lr_el2[regno];
+ trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
+ }
+
+ return value;
+}
+
+static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 | ((ri->crm & 1) << 3);
+
+ /* This write function handles all of:
+ * 64-bit writes to the whole LR
+ * 32-bit writes to the low half of the LR
+ * 32-bit writes to the high half of the LR
+ */
+ if (ri->state == ARM_CP_STATE_AA32) {
+ if (ri->crm >= 14) {
+ trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
+ value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
+ } else {
+ trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
+ value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
+ }
+ } else {
+ trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
+ }
+
+ /* Enforce RES0 bits in priority field */
+ if (cs->vpribits < 8) {
+ value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
+ 8 - cs->vpribits, 0);
+ }
+
+ cs->ich_lr_el2[regno] = value;
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
+ | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
+ | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
+ | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
+ | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
+
+ trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = maintenance_interrupt_state(cs);
+
+ trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
+
+ trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = 0;
+ int i;
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+
+ if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
+ ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
+ value |= (1 << i);
+ }
+ }
+
+ trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
+ { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_hcr_read,
+ .writefn = ich_hcr_write,
+ },
+ { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_vtr_read,
+ },
+ { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_misr_read,
+ },
+ { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_eisr_read,
+ },
+ { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_elrsr_read,
+ },
+ { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_vmcr_read,
+ .writefn = ich_vmcr_write,
+ },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
+ { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
+ { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ REGINFO_SENTINEL
+};
+
+static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
+{
+ GICv3CPUState *cs = opaque;
+
+ gicv3_cpuif_update(cs);
+}
+
+void gicv3_init_cpuif(GICv3State *s)
+{
+ /* Called from the GICv3 realize function; register our system
+ * registers with the CPU
+ */
+ int i;
+
+ for (i = 0; i < s->num_cpu; i++) {
+ ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
+ GICv3CPUState *cs = &s->cpu[i];
+
+ /* Note that we can't just use the GICv3CPUState as an opaque pointer
+ * in define_arm_cp_regs_with_opaque(), because when we're called back
+ * it might be with code translated by CPU 0 but run by CPU 1, in
+ * which case we'd get the wrong value.
+ * So instead we define the regs with no ri->opaque info, and
+ * get back to the GICv3CPUState from the CPUARMState.
+ */
+ define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
+ && cpu->gic_num_lrs) {
+ int j;
+
+ cs->num_list_regs = cpu->gic_num_lrs;
+ cs->vpribits = cpu->gic_vpribits;
+ cs->vprebits = cpu->gic_vprebits;
+
+ /* Check against architectural constraints: getting these
+ * wrong would be a bug in the CPU code defining these,
+ * and the implementation relies on them holding.
+ */
+ g_assert(cs->vprebits <= cs->vpribits);
+ g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
+ g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
+
+ define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
+
+ for (j = 0; j < cs->num_list_regs; j++) {
+ /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
+ * are split into two cp15 regs, LR (the low part, with the
+ * same encoding as the AArch64 LR) and LRC (the high part).
+ */
+ ARMCPRegInfo lr_regset[] = {
+ { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12,
+ .crm = 12 + (j >> 3), .opc2 = j & 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_lr_read,
+ .writefn = ich_lr_write,
+ },
+ { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 12,
+ .crm = 14 + (j >> 3), .opc2 = j & 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_lr_read,
+ .writefn = ich_lr_write,
+ },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, lr_regset);
+ }
+ if (cs->vprebits >= 6) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
+ }
+ if (cs->vprebits == 7) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
+ }
+ }
+ arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
+ }
+}
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
new file mode 100644
index 000000000..4164500ea
--- /dev/null
+++ b/hw/intc/arm_gicv3_dist.c
@@ -0,0 +1,914 @@
+/*
+ * ARM GICv3 emulation: Distributor
+ *
+ * Copyright (c) 2015 Huawei.
+ * Copyright (c) 2016 Linaro Limited.
+ * Written by Shlomo Pongratz, Peter Maydell
+ *
+ * This code is licensed under the GPL, version 2 or (at your option)
+ * any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "gicv3_internal.h"
+
+/* The GICD_NSACR registers contain a two bit field for each interrupt which
+ * allows the guest to give NonSecure code access to registers controlling
+ * Secure interrupts:
+ * 0b00: no access (NS accesses to bits for Secure interrupts will RAZ/WI)
+ * 0b01: NS r/w accesses permitted to ISPENDR, SETSPI_NSR, SGIR
+ * 0b10: as 0b01, and also r/w to ICPENDR, r/o to ISACTIVER/ICACTIVER,
+ * and w/o to CLRSPI_NSR
+ * 0b11: as 0b10, and also r/w to IROUTER and ITARGETSR
+ *
+ * Given a (multiple-of-32) interrupt number, these mask functions return
+ * a mask word where each bit is 1 if the NSACR settings permit access
+ * to the interrupt. The mask returned can then be ORed with the GICD_GROUP
+ * word for this set of interrupts to give an overall mask.
+ */
+
+typedef uint32_t maskfn(GICv3State *s, int irq);
+
+static uint32_t mask_nsacr_ge1(GICv3State *s, int irq)
+{
+ /* Return a mask where each bit is set if the NSACR field is >= 1 */
+ uint64_t raw_nsacr = s->gicd_nsacr[irq / 16 + 1];
+
+ raw_nsacr = raw_nsacr << 32 | s->gicd_nsacr[irq / 16];
+ raw_nsacr = (raw_nsacr >> 1) | raw_nsacr;
+ return half_unshuffle64(raw_nsacr);
+}
+
+static uint32_t mask_nsacr_ge2(GICv3State *s, int irq)
+{
+ /* Return a mask where each bit is set if the NSACR field is >= 2 */
+ uint64_t raw_nsacr = s->gicd_nsacr[irq / 16 + 1];
+
+ raw_nsacr = raw_nsacr << 32 | s->gicd_nsacr[irq / 16];
+ raw_nsacr = raw_nsacr >> 1;
+ return half_unshuffle64(raw_nsacr);
+}
+
+/* We don't need a mask_nsacr_ge3() because IROUTER<n> isn't a bitmap register,
+ * but it would be implemented using:
+ * raw_nsacr = (raw_nsacr >> 1) & raw_nsacr;
+ */
+
+static uint32_t mask_group_and_nsacr(GICv3State *s, MemTxAttrs attrs,
+ maskfn *maskfn, int irq)
+{
+ /* Return a 32-bit mask which should be applied for this set of 32
+ * interrupts; each bit is 1 if access is permitted by the
+ * combination of attrs.secure, GICD_GROUPR and GICD_NSACR.
+ */
+ uint32_t mask;
+
+ if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) {
+ /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI
+ * unless the NSACR bits permit access.
+ */
+ mask = *gic_bmp_ptr32(s->group, irq);
+ if (maskfn) {
+ mask |= maskfn(s, irq);
+ }
+ return mask;
+ }
+ return 0xFFFFFFFFU;
+}
+
+static int gicd_ns_access(GICv3State *s, int irq)
+{
+ /* Return the 2 bit NS_access<x> field from GICD_NSACR<n> for the
+ * specified interrupt.
+ */
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return 0;
+ }
+ return extract32(s->gicd_nsacr[irq / 16], (irq % 16) * 2, 2);
+}
+
+static void gicd_write_set_bitmap_reg(GICv3State *s, MemTxAttrs attrs,
+ uint32_t *bmp,
+ maskfn *maskfn,
+ int offset, uint32_t val)
+{
+ /* Helper routine to implement writing to a "set-bitmap" register
+ * (GICD_ISENABLER, GICD_ISPENDR, etc).
+ * Semantics implemented here:
+ * RAZ/WI for SGIs, PPIs, unimplemented IRQs
+ * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI.
+ * Writing 1 means "set bit in bitmap"; writing 0 is ignored.
+ * offset should be the offset in bytes of the register from the start
+ * of its group.
+ */
+ int irq = offset * 8;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return;
+ }
+ val &= mask_group_and_nsacr(s, attrs, maskfn, irq);
+ *gic_bmp_ptr32(bmp, irq) |= val;
+ gicv3_update(s, irq, 32);
+}
+
+static void gicd_write_clear_bitmap_reg(GICv3State *s, MemTxAttrs attrs,
+ uint32_t *bmp,
+ maskfn *maskfn,
+ int offset, uint32_t val)
+{
+ /* Helper routine to implement writing to a "clear-bitmap" register
+ * (GICD_ICENABLER, GICD_ICPENDR, etc).
+ * Semantics implemented here:
+ * RAZ/WI for SGIs, PPIs, unimplemented IRQs
+ * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI.
+ * Writing 1 means "clear bit in bitmap"; writing 0 is ignored.
+ * offset should be the offset in bytes of the register from the start
+ * of its group.
+ */
+ int irq = offset * 8;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return;
+ }
+ val &= mask_group_and_nsacr(s, attrs, maskfn, irq);
+ *gic_bmp_ptr32(bmp, irq) &= ~val;
+ gicv3_update(s, irq, 32);
+}
+
+static uint32_t gicd_read_bitmap_reg(GICv3State *s, MemTxAttrs attrs,
+ uint32_t *bmp,
+ maskfn *maskfn,
+ int offset)
+{
+ /* Helper routine to implement reading a "set/clear-bitmap" register
+ * (GICD_ICENABLER, GICD_ISENABLER, GICD_ICPENDR, etc).
+ * Semantics implemented here:
+ * RAZ/WI for SGIs, PPIs, unimplemented IRQs
+ * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI.
+ * offset should be the offset in bytes of the register from the start
+ * of its group.
+ */
+ int irq = offset * 8;
+ uint32_t val;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return 0;
+ }
+ val = *gic_bmp_ptr32(bmp, irq);
+ if (bmp == s->pending) {
+ /* The PENDING register is a special case -- for level triggered
+ * interrupts, the PENDING state is the logical OR of the state of
+ * the PENDING latch with the input line level.
+ */
+ uint32_t edge = *gic_bmp_ptr32(s->edge_trigger, irq);
+ uint32_t level = *gic_bmp_ptr32(s->level, irq);
+ val |= (~edge & level);
+ }
+ val &= mask_group_and_nsacr(s, attrs, maskfn, irq);
+ return val;
+}
+
+static uint8_t gicd_read_ipriorityr(GICv3State *s, MemTxAttrs attrs, int irq)
+{
+ /* Read the value of GICD_IPRIORITYR<n> for the specified interrupt,
+ * honouring security state (these are RAZ/WI for Group 0 or Secure
+ * Group 1 interrupts).
+ */
+ uint32_t prio;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return 0;
+ }
+
+ prio = s->gicd_ipriority[irq];
+
+ if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) {
+ if (!gicv3_gicd_group_test(s, irq)) {
+ /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
+ return 0;
+ }
+ /* NS view of the interrupt priority */
+ prio = (prio << 1) & 0xff;
+ }
+ return prio;
+}
+
+static void gicd_write_ipriorityr(GICv3State *s, MemTxAttrs attrs, int irq,
+ uint8_t value)
+{
+ /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
+ * honouring security state (these are RAZ/WI for Group 0 or Secure
+ * Group 1 interrupts).
+ */
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return;
+ }
+
+ if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) {
+ if (!gicv3_gicd_group_test(s, irq)) {
+ /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
+ return;
+ }
+ /* NS view of the interrupt priority */
+ value = 0x80 | (value >> 1);
+ }
+ s->gicd_ipriority[irq] = value;
+}
+
+static uint64_t gicd_read_irouter(GICv3State *s, MemTxAttrs attrs, int irq)
+{
+ /* Read the value of GICD_IROUTER<n> for the specified interrupt,
+ * honouring security state.
+ */
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return 0;
+ }
+
+ if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) {
+ /* RAZ/WI for NS accesses to secure interrupts */
+ if (!gicv3_gicd_group_test(s, irq)) {
+ if (gicd_ns_access(s, irq) != 3) {
+ return 0;
+ }
+ }
+ }
+
+ return s->gicd_irouter[irq];
+}
+
+static void gicd_write_irouter(GICv3State *s, MemTxAttrs attrs, int irq,
+ uint64_t val)
+{
+ /* Write the value of GICD_IROUTER<n> for the specified interrupt,
+ * honouring security state.
+ */
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return;
+ }
+
+ if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) {
+ /* RAZ/WI for NS accesses to secure interrupts */
+ if (!gicv3_gicd_group_test(s, irq)) {
+ if (gicd_ns_access(s, irq) != 3) {
+ return;
+ }
+ }
+ }
+
+ s->gicd_irouter[irq] = val;
+ gicv3_cache_target_cpustate(s, irq);
+ gicv3_update(s, irq, 1);
+}
+
+/**
+ * gicd_readb
+ * gicd_readw
+ * gicd_readl
+ * gicd_readq
+ * gicd_writeb
+ * gicd_writew
+ * gicd_writel
+ * gicd_writeq
+ *
+ * Return %true if the operation succeeded, %false otherwise.
+ */
+
+static bool gicd_readb(GICv3State *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ /* Most GICv3 distributor registers do not support byte accesses. */
+ switch (offset) {
+ case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf:
+ case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf:
+ case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff:
+ /* This GIC implementation always has affinity routing enabled,
+ * so these registers are all RAZ/WI.
+ */
+ return true;
+ case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff:
+ *data = gicd_read_ipriorityr(s, attrs, offset - GICD_IPRIORITYR);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool gicd_writeb(GICv3State *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ /* Most GICv3 distributor registers do not support byte accesses. */
+ switch (offset) {
+ case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf:
+ case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf:
+ case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff:
+ /* This GIC implementation always has affinity routing enabled,
+ * so these registers are all RAZ/WI.
+ */
+ return true;
+ case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff:
+ {
+ int irq = offset - GICD_IPRIORITYR;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return true;
+ }
+ gicd_write_ipriorityr(s, attrs, irq, value);
+ gicv3_update(s, irq, 1);
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool gicd_readw(GICv3State *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ /* Only GICD_SETSPI_NSR, GICD_CLRSPI_NSR, GICD_SETSPI_SR and GICD_SETSPI_NSR
+ * support 16 bit accesses, and those registers are all part of the
+ * optional message-based SPI feature which this GIC does not currently
+ * implement (ie for us GICD_TYPER.MBIS == 0), so for us they are
+ * reserved.
+ */
+ return false;
+}
+
+static bool gicd_writew(GICv3State *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ /* Only GICD_SETSPI_NSR, GICD_CLRSPI_NSR, GICD_SETSPI_SR and GICD_SETSPI_NSR
+ * support 16 bit accesses, and those registers are all part of the
+ * optional message-based SPI feature which this GIC does not currently
+ * implement (ie for us GICD_TYPER.MBIS == 0), so for us they are
+ * reserved.
+ */
+ return false;
+}
+
+static bool gicd_readl(GICv3State *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ /* Almost all GICv3 distributor registers are 32-bit.
+ * Note that WO registers must return an UNKNOWN value on reads,
+ * not an abort.
+ */
+
+ switch (offset) {
+ case GICD_CTLR:
+ if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) {
+ /* The NS view of the GICD_CTLR sees only certain bits:
+ * + bit [31] (RWP) is an alias of the Secure bit [31]
+ * + bit [4] (ARE_NS) is an alias of Secure bit [5]
+ * + bit [1] (EnableGrp1A) is an alias of Secure bit [1] if
+ * NS affinity routing is enabled, otherwise RES0
+ * + bit [0] (EnableGrp1) is an alias of Secure bit [1] if
+ * NS affinity routing is not enabled, otherwise RES0
+ * Since for QEMU affinity routing is always enabled
+ * for both S and NS this means that bits [4] and [5] are
+ * both always 1, and we can simply make the NS view
+ * be bits 31, 4 and 1 of the S view.
+ */
+ *data = s->gicd_ctlr & (GICD_CTLR_ARE_S |
+ GICD_CTLR_EN_GRP1NS |
+ GICD_CTLR_RWP);
+ } else {
+ *data = s->gicd_ctlr;
+ }
+ return true;
+ case GICD_TYPER:
+ {
+ /* For this implementation:
+ * No1N == 1 (1-of-N SPI interrupts not supported)
+ * A3V == 1 (non-zero values of Affinity level 3 supported)
+ * IDbits == 0xf (we support 16-bit interrupt identifiers)
+ * DVIS == 0 (Direct virtual LPI injection not supported)
+ * LPIS == 1 (LPIs are supported if affinity routing is enabled)
+ * num_LPIs == 0b00000 (bits [15:11],Number of LPIs as indicated
+ * by GICD_TYPER.IDbits)
+ * MBIS == 0 (message-based SPIs not supported)
+ * SecurityExtn == 1 if security extns supported
+ * CPUNumber == 0 since for us ARE is always 1
+ * ITLinesNumber == (num external irqs / 32) - 1
+ */
+ int itlinesnumber = ((s->num_irq - GIC_INTERNAL) / 32) - 1;
+ /*
+ * SecurityExtn must be RAZ if GICD_CTLR.DS == 1, and
+ * "security extensions not supported" always implies DS == 1,
+ * so we only need to check the DS bit.
+ */
+ bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS);
+
+ *data = (1 << 25) | (1 << 24) | (sec_extn << 10) |
+ (s->lpi_enable << GICD_TYPER_LPIS_SHIFT) |
+ (0xf << 19) | itlinesnumber;
+ return true;
+ }
+ case GICD_IIDR:
+ /* We claim to be an ARM r0p0 with a zero ProductID.
+ * This is the same as an r0p0 GIC-500.
+ */
+ *data = gicv3_iidr();
+ return true;
+ case GICD_STATUSR:
+ /* RAZ/WI for us (this is an optional register and our implementation
+ * does not track RO/WO/reserved violations to report them to the guest)
+ */
+ *data = 0;
+ return true;
+ case GICD_IGROUPR ... GICD_IGROUPR + 0x7f:
+ {
+ int irq;
+
+ if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) {
+ *data = 0;
+ return true;
+ }
+ /* RAZ/WI for SGIs, PPIs, unimplemented irqs */
+ irq = (offset - GICD_IGROUPR) * 8;
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ *data = 0;
+ return true;
+ }
+ *data = *gic_bmp_ptr32(s->group, irq);
+ return true;
+ }
+ case GICD_ISENABLER ... GICD_ISENABLER + 0x7f:
+ *data = gicd_read_bitmap_reg(s, attrs, s->enabled, NULL,
+ offset - GICD_ISENABLER);
+ return true;
+ case GICD_ICENABLER ... GICD_ICENABLER + 0x7f:
+ *data = gicd_read_bitmap_reg(s, attrs, s->enabled, NULL,
+ offset - GICD_ICENABLER);
+ return true;
+ case GICD_ISPENDR ... GICD_ISPENDR + 0x7f:
+ *data = gicd_read_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge1,
+ offset - GICD_ISPENDR);
+ return true;
+ case GICD_ICPENDR ... GICD_ICPENDR + 0x7f:
+ *data = gicd_read_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge2,
+ offset - GICD_ICPENDR);
+ return true;
+ case GICD_ISACTIVER ... GICD_ISACTIVER + 0x7f:
+ *data = gicd_read_bitmap_reg(s, attrs, s->active, mask_nsacr_ge2,
+ offset - GICD_ISACTIVER);
+ return true;
+ case GICD_ICACTIVER ... GICD_ICACTIVER + 0x7f:
+ *data = gicd_read_bitmap_reg(s, attrs, s->active, mask_nsacr_ge2,
+ offset - GICD_ICACTIVER);
+ return true;
+ case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff:
+ {
+ int i, irq = offset - GICD_IPRIORITYR;
+ uint32_t value = 0;
+
+ for (i = irq + 3; i >= irq; i--) {
+ value <<= 8;
+ value |= gicd_read_ipriorityr(s, attrs, i);
+ }
+ *data = value;
+ return true;
+ }
+ case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff:
+ /* RAZ/WI since affinity routing is always enabled */
+ *data = 0;
+ return true;
+ case GICD_ICFGR ... GICD_ICFGR + 0xff:
+ {
+ /* Here only the even bits are used; odd bits are RES0 */
+ int irq = (offset - GICD_ICFGR) * 4;
+ uint32_t value = 0;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ *data = 0;
+ return true;
+ }
+
+ /* Since our edge_trigger bitmap is one bit per irq, we only need
+ * half of the 32-bit word, which we can then spread out
+ * into the odd bits.
+ */
+ value = *gic_bmp_ptr32(s->edge_trigger, irq & ~0x1f);
+ value &= mask_group_and_nsacr(s, attrs, NULL, irq & ~0x1f);
+ value = extract32(value, (irq & 0x1f) ? 16 : 0, 16);
+ value = half_shuffle32(value) << 1;
+ *data = value;
+ return true;
+ }
+ case GICD_IGRPMODR ... GICD_IGRPMODR + 0xff:
+ {
+ int irq;
+
+ if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
+ /* RAZ/WI if security disabled, or if
+ * security enabled and this is an NS access
+ */
+ *data = 0;
+ return true;
+ }
+ /* RAZ/WI for SGIs, PPIs, unimplemented irqs */
+ irq = (offset - GICD_IGRPMODR) * 8;
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ *data = 0;
+ return true;
+ }
+ *data = *gic_bmp_ptr32(s->grpmod, irq);
+ return true;
+ }
+ case GICD_NSACR ... GICD_NSACR + 0xff:
+ {
+ /* Two bits per interrupt */
+ int irq = (offset - GICD_NSACR) * 4;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ *data = 0;
+ return true;
+ }
+
+ if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
+ /* RAZ/WI if security disabled, or if
+ * security enabled and this is an NS access
+ */
+ *data = 0;
+ return true;
+ }
+
+ *data = s->gicd_nsacr[irq / 16];
+ return true;
+ }
+ case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf:
+ case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf:
+ /* RAZ/WI since affinity routing is always enabled */
+ *data = 0;
+ return true;
+ case GICD_IROUTER ... GICD_IROUTER + 0x1fdf:
+ {
+ uint64_t r;
+ int irq = (offset - GICD_IROUTER) / 8;
+
+ r = gicd_read_irouter(s, attrs, irq);
+ if (offset & 7) {
+ *data = r >> 32;
+ } else {
+ *data = (uint32_t)r;
+ }
+ return true;
+ }
+ case GICD_IDREGS ... GICD_IDREGS + 0x2f:
+ /* ID registers */
+ *data = gicv3_idreg(offset - GICD_IDREGS);
+ return true;
+ case GICD_SGIR:
+ /* WO registers, return unknown value */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest read from WO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ *data = 0;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool gicd_writel(GICv3State *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ /* Almost all GICv3 distributor registers are 32-bit. Note that
+ * RO registers must ignore writes, not abort.
+ */
+
+ switch (offset) {
+ case GICD_CTLR:
+ {
+ uint32_t mask;
+ /* GICv3 5.3.20 */
+ if (s->gicd_ctlr & GICD_CTLR_DS) {
+ /* With only one security state, E1NWF is RAZ/WI, DS is RAO/WI,
+ * ARE is RAO/WI (affinity routing always on), and only
+ * bits 0 and 1 (group enables) are writable.
+ */
+ mask = GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1NS;
+ } else {
+ if (attrs.secure) {
+ /* for secure access:
+ * ARE_NS and ARE_S are RAO/WI (affinity routing always on)
+ * E1NWF is RAZ/WI (we don't support enable-1-of-n-wakeup)
+ *
+ * We can only modify bits[2:0] (the group enables).
+ */
+ mask = GICD_CTLR_DS | GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1_ALL;
+ } else {
+ /* For non secure access ARE_NS is RAO/WI and EnableGrp1
+ * is RES0. The only writable bit is [1] (EnableGrp1A), which
+ * is an alias of the Secure bit [1].
+ */
+ mask = GICD_CTLR_EN_GRP1NS;
+ }
+ }
+ s->gicd_ctlr = (s->gicd_ctlr & ~mask) | (value & mask);
+ if (value & mask & GICD_CTLR_DS) {
+ /* We just set DS, so the ARE_NS and EnG1S bits are now RES0.
+ * Note that this is a one-way transition because if DS is set
+ * then it's not writeable, so it can only go back to 0 with a
+ * hardware reset.
+ */
+ s->gicd_ctlr &= ~(GICD_CTLR_EN_GRP1S | GICD_CTLR_ARE_NS);
+ }
+ gicv3_full_update(s);
+ return true;
+ }
+ case GICD_STATUSR:
+ /* RAZ/WI for our implementation */
+ return true;
+ case GICD_IGROUPR ... GICD_IGROUPR + 0x7f:
+ {
+ int irq;
+
+ if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) {
+ return true;
+ }
+ /* RAZ/WI for SGIs, PPIs, unimplemented irqs */
+ irq = (offset - GICD_IGROUPR) * 8;
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return true;
+ }
+ *gic_bmp_ptr32(s->group, irq) = value;
+ gicv3_update(s, irq, 32);
+ return true;
+ }
+ case GICD_ISENABLER ... GICD_ISENABLER + 0x7f:
+ gicd_write_set_bitmap_reg(s, attrs, s->enabled, NULL,
+ offset - GICD_ISENABLER, value);
+ return true;
+ case GICD_ICENABLER ... GICD_ICENABLER + 0x7f:
+ gicd_write_clear_bitmap_reg(s, attrs, s->enabled, NULL,
+ offset - GICD_ICENABLER, value);
+ return true;
+ case GICD_ISPENDR ... GICD_ISPENDR + 0x7f:
+ gicd_write_set_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge1,
+ offset - GICD_ISPENDR, value);
+ return true;
+ case GICD_ICPENDR ... GICD_ICPENDR + 0x7f:
+ gicd_write_clear_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge2,
+ offset - GICD_ICPENDR, value);
+ return true;
+ case GICD_ISACTIVER ... GICD_ISACTIVER + 0x7f:
+ gicd_write_set_bitmap_reg(s, attrs, s->active, NULL,
+ offset - GICD_ISACTIVER, value);
+ return true;
+ case GICD_ICACTIVER ... GICD_ICACTIVER + 0x7f:
+ gicd_write_clear_bitmap_reg(s, attrs, s->active, NULL,
+ offset - GICD_ICACTIVER, value);
+ return true;
+ case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff:
+ {
+ int i, irq = offset - GICD_IPRIORITYR;
+
+ if (irq < GIC_INTERNAL || irq + 3 >= s->num_irq) {
+ return true;
+ }
+
+ for (i = irq; i < irq + 4; i++, value >>= 8) {
+ gicd_write_ipriorityr(s, attrs, i, value);
+ }
+ gicv3_update(s, irq, 4);
+ return true;
+ }
+ case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff:
+ /* RAZ/WI since affinity routing is always enabled */
+ return true;
+ case GICD_ICFGR ... GICD_ICFGR + 0xff:
+ {
+ /* Here only the odd bits are used; even bits are RES0 */
+ int irq = (offset - GICD_ICFGR) * 4;
+ uint32_t mask, oldval;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return true;
+ }
+
+ /* Since our edge_trigger bitmap is one bit per irq, our input
+ * 32-bits will compress down into 16 bits which we need
+ * to write into the bitmap.
+ */
+ value = half_unshuffle32(value >> 1);
+ mask = mask_group_and_nsacr(s, attrs, NULL, irq & ~0x1f);
+ if (irq & 0x1f) {
+ value <<= 16;
+ mask &= 0xffff0000U;
+ } else {
+ mask &= 0xffff;
+ }
+ oldval = *gic_bmp_ptr32(s->edge_trigger, (irq & ~0x1f));
+ value = (oldval & ~mask) | (value & mask);
+ *gic_bmp_ptr32(s->edge_trigger, irq & ~0x1f) = value;
+ return true;
+ }
+ case GICD_IGRPMODR ... GICD_IGRPMODR + 0xff:
+ {
+ int irq;
+
+ if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
+ /* RAZ/WI if security disabled, or if
+ * security enabled and this is an NS access
+ */
+ return true;
+ }
+ /* RAZ/WI for SGIs, PPIs, unimplemented irqs */
+ irq = (offset - GICD_IGRPMODR) * 8;
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return true;
+ }
+ *gic_bmp_ptr32(s->grpmod, irq) = value;
+ gicv3_update(s, irq, 32);
+ return true;
+ }
+ case GICD_NSACR ... GICD_NSACR + 0xff:
+ {
+ /* Two bits per interrupt */
+ int irq = (offset - GICD_NSACR) * 4;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return true;
+ }
+
+ if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
+ /* RAZ/WI if security disabled, or if
+ * security enabled and this is an NS access
+ */
+ return true;
+ }
+
+ s->gicd_nsacr[irq / 16] = value;
+ /* No update required as this only affects access permission checks */
+ return true;
+ }
+ case GICD_SGIR:
+ /* RES0 if affinity routing is enabled */
+ return true;
+ case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf:
+ case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf:
+ /* RAZ/WI since affinity routing is always enabled */
+ return true;
+ case GICD_IROUTER ... GICD_IROUTER + 0x1fdf:
+ {
+ uint64_t r;
+ int irq = (offset - GICD_IROUTER) / 8;
+
+ if (irq < GIC_INTERNAL || irq >= s->num_irq) {
+ return true;
+ }
+
+ /* Write half of the 64-bit register */
+ r = gicd_read_irouter(s, attrs, irq);
+ r = deposit64(r, (offset & 7) ? 32 : 0, 32, value);
+ gicd_write_irouter(s, attrs, irq, r);
+ return true;
+ }
+ case GICD_IDREGS ... GICD_IDREGS + 0x2f:
+ case GICD_TYPER:
+ case GICD_IIDR:
+ /* RO registers, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool gicd_writeq(GICv3State *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ /* Our only 64-bit registers are GICD_IROUTER<n> */
+ int irq;
+
+ switch (offset) {
+ case GICD_IROUTER ... GICD_IROUTER + 0x1fdf:
+ irq = (offset - GICD_IROUTER) / 8;
+ gicd_write_irouter(s, attrs, irq, value);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool gicd_readq(GICv3State *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ /* Our only 64-bit registers are GICD_IROUTER<n> */
+ int irq;
+
+ switch (offset) {
+ case GICD_IROUTER ... GICD_IROUTER + 0x1fdf:
+ irq = (offset - GICD_IROUTER) / 8;
+ *data = gicd_read_irouter(s, attrs, irq);
+ return true;
+ default:
+ return false;
+ }
+}
+
+MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICv3State *s = (GICv3State *)opaque;
+ bool r;
+
+ switch (size) {
+ case 1:
+ r = gicd_readb(s, offset, data, attrs);
+ break;
+ case 2:
+ r = gicd_readw(s, offset, data, attrs);
+ break;
+ case 4:
+ r = gicd_readl(s, offset, data, attrs);
+ break;
+ case 8:
+ r = gicd_readq(s, offset, data, attrs);
+ break;
+ default:
+ r = false;
+ break;
+ }
+
+ if (!r) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest read at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+ trace_gicv3_dist_badread(offset, size, attrs.secure);
+ /* The spec requires that reserved registers are RAZ/WI;
+ * so use MEMTX_ERROR returns from leaf functions as a way to
+ * trigger the guest-error logging but don't return it to
+ * the caller, or we'll cause a spurious guest data abort.
+ */
+ *data = 0;
+ } else {
+ trace_gicv3_dist_read(offset, *data, size, attrs.secure);
+ }
+ return MEMTX_OK;
+}
+
+MemTxResult gicv3_dist_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICv3State *s = (GICv3State *)opaque;
+ bool r;
+
+ switch (size) {
+ case 1:
+ r = gicd_writeb(s, offset, data, attrs);
+ break;
+ case 2:
+ r = gicd_writew(s, offset, data, attrs);
+ break;
+ case 4:
+ r = gicd_writel(s, offset, data, attrs);
+ break;
+ case 8:
+ r = gicd_writeq(s, offset, data, attrs);
+ break;
+ default:
+ r = false;
+ break;
+ }
+
+ if (!r) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+ trace_gicv3_dist_badwrite(offset, data, size, attrs.secure);
+ /* The spec requires that reserved registers are RAZ/WI;
+ * so use MEMTX_ERROR returns from leaf functions as a way to
+ * trigger the guest-error logging but don't return it to
+ * the caller, or we'll cause a spurious guest data abort.
+ */
+ } else {
+ trace_gicv3_dist_write(offset, data, size, attrs.secure);
+ }
+ return MEMTX_OK;
+}
+
+void gicv3_dist_set_irq(GICv3State *s, int irq, int level)
+{
+ /* Update distributor state for a change in an external SPI input line */
+ if (level == gicv3_gicd_level_test(s, irq)) {
+ return;
+ }
+
+ trace_gicv3_dist_set_irq(irq, level);
+
+ gicv3_gicd_level_replace(s, irq, level);
+
+ if (level) {
+ /* 0->1 edges latch the pending bit for edge-triggered interrupts */
+ if (gicv3_gicd_edge_trigger_test(s, irq)) {
+ gicv3_gicd_pending_set(s, irq);
+ }
+ }
+
+ gicv3_update(s, irq, 1);
+}
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
new file mode 100644
index 000000000..c929a9cb5
--- /dev/null
+++ b/hw/intc/arm_gicv3_its.c
@@ -0,0 +1,1323 @@
+/*
+ * ITS emulation for a GICv3-based system
+ *
+ * Copyright Linaro.org 2021
+ *
+ * Authors:
+ * Shashi Mallela <shashi.mallela@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "hw/qdev-properties.h"
+#include "hw/intc/arm_gicv3_its_common.h"
+#include "gicv3_internal.h"
+#include "qom/object.h"
+#include "qapi/error.h"
+
+typedef struct GICv3ITSClass GICv3ITSClass;
+/* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
+DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
+ ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
+
+struct GICv3ITSClass {
+ GICv3ITSCommonClass parent_class;
+ void (*parent_reset)(DeviceState *dev);
+};
+
+/*
+ * This is an internal enum used to distinguish between LPI triggered
+ * via command queue and LPI triggered via gits_translater write.
+ */
+typedef enum ItsCmdType {
+ NONE = 0, /* internal indication for GITS_TRANSLATER write */
+ CLEAR = 1,
+ DISCARD = 2,
+ INTERRUPT = 3,
+} ItsCmdType;
+
+typedef struct {
+ uint32_t iteh;
+ uint64_t itel;
+} IteEntry;
+
+static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
+{
+ uint64_t result = 0;
+
+ switch (page_sz) {
+ case GITS_PAGE_SIZE_4K:
+ case GITS_PAGE_SIZE_16K:
+ result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
+ break;
+
+ case GITS_PAGE_SIZE_64K:
+ result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
+ result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
+ break;
+
+ default:
+ break;
+ }
+ return result;
+}
+
+static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
+ MemTxResult *res)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t l2t_addr;
+ uint64_t value;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+
+ if (s->ct.indirect) {
+ l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->ct.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+
+ if (*res == MEMTX_OK) {
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ *cte = address_space_ldq_le(as, l2t_addr +
+ ((icid % max_l2_entries) * GITS_CTE_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ }
+ }
+ } else {
+ /* Flat level table */
+ *cte = address_space_ldq_le(as, s->ct.base_addr +
+ (icid * GITS_CTE_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ }
+
+ return (*cte & TABLE_ENTRY_VALID_MASK) != 0;
+}
+
+static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
+ IteEntry ite)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t itt_addr;
+ MemTxResult res = MEMTX_OK;
+
+ itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
+ itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
+
+ address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
+ sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
+ &res);
+
+ if (res == MEMTX_OK) {
+ address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
+ sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ if (res != MEMTX_OK) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
+ uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t itt_addr;
+ bool status = false;
+ IteEntry ite = {};
+
+ itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
+ itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
+
+ ite.itel = address_space_ldq_le(as, itt_addr +
+ (eventid * (sizeof(uint64_t) +
+ sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
+ res);
+
+ if (*res == MEMTX_OK) {
+ ite.iteh = address_space_ldl_le(as, itt_addr +
+ (eventid * (sizeof(uint64_t) +
+ sizeof(uint32_t))) + sizeof(uint32_t),
+ MEMTXATTRS_UNSPECIFIED, res);
+
+ if (*res == MEMTX_OK) {
+ if (ite.itel & TABLE_ENTRY_VALID_MASK) {
+ if ((ite.itel >> ITE_ENTRY_INTTYPE_SHIFT) &
+ GITS_TYPE_PHYSICAL) {
+ *pIntid = (ite.itel & ITE_ENTRY_INTID_MASK) >>
+ ITE_ENTRY_INTID_SHIFT;
+ *icid = ite.iteh & ITE_ENTRY_ICID_MASK;
+ status = true;
+ }
+ }
+ }
+ }
+ return status;
+}
+
+static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t l2t_addr;
+ uint64_t value;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+
+ if (s->dt.indirect) {
+ l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->dt.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+
+ if (*res == MEMTX_OK) {
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ value = address_space_ldq_le(as, l2t_addr +
+ ((devid % max_l2_entries) * GITS_DTE_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ }
+ }
+ } else {
+ /* Flat level table */
+ value = address_space_ldq_le(as, s->dt.base_addr +
+ (devid * GITS_DTE_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ }
+
+ return value;
+}
+
+/*
+ * This function handles the processing of following commands based on
+ * the ItsCmdType parameter passed:-
+ * 1. triggering of lpi interrupt translation via ITS INT command
+ * 2. triggering of lpi interrupt translation via gits_translater register
+ * 3. handling of ITS CLEAR command
+ * 4. handling of ITS DISCARD command
+ */
+static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
+ ItsCmdType cmd)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint32_t devid, eventid;
+ MemTxResult res = MEMTX_OK;
+ bool dte_valid;
+ uint64_t dte = 0;
+ uint32_t max_eventid;
+ uint16_t icid = 0;
+ uint32_t pIntid = 0;
+ bool ite_valid = false;
+ uint64_t cte = 0;
+ bool cte_valid = false;
+ bool result = false;
+ uint64_t rdbase;
+
+ if (cmd == NONE) {
+ devid = offset;
+ } else {
+ devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ }
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ eventid = (value & EVENTID_MASK);
+
+ dte = get_dte(s, devid, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+ dte_valid = dte & TABLE_ENTRY_VALID_MASK;
+
+ if (dte_valid) {
+ max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
+
+ ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ if (ite_valid) {
+ cte_valid = get_cte(s, icid, &cte, &res);
+ }
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+ }
+
+ if ((devid > s->dt.maxids.max_devids) || !dte_valid || !ite_valid ||
+ !cte_valid || (eventid > max_eventid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes "
+ "devid %d or eventid %d or invalid dte %d or"
+ "invalid cte %d or invalid ite %d\n",
+ __func__, devid, eventid, dte_valid, cte_valid,
+ ite_valid);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ /*
+ * Current implementation only supports rdbase == procnum
+ * Hence rdbase physical address is ignored
+ */
+ rdbase = (cte & GITS_CTE_RDBASE_PROCNUM_MASK) >> 1U;
+
+ if (rdbase > s->gicv3->num_cpu) {
+ return result;
+ }
+
+ if ((cmd == CLEAR) || (cmd == DISCARD)) {
+ gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
+ } else {
+ gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
+ }
+
+ if (cmd == DISCARD) {
+ IteEntry ite = {};
+ /* remove mapping from interrupt translation table */
+ result = update_ite(s, eventid, dte, ite);
+ }
+ }
+
+ return result;
+}
+
+static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
+ bool ignore_pInt)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint32_t devid, eventid;
+ uint32_t pIntid = 0;
+ uint32_t max_eventid, max_Intid;
+ bool dte_valid;
+ MemTxResult res = MEMTX_OK;
+ uint16_t icid = 0;
+ uint64_t dte = 0;
+ IteEntry ite;
+ uint32_t int_spurious = INTID_SPURIOUS;
+ bool result = false;
+
+ devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ eventid = (value & EVENTID_MASK);
+
+ if (!ignore_pInt) {
+ pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
+ }
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ icid = value & ICID_MASK;
+
+ dte = get_dte(s, devid, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+ dte_valid = dte & TABLE_ENTRY_VALID_MASK;
+
+ max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
+
+ if (!ignore_pInt) {
+ max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
+ }
+
+ if ((devid > s->dt.maxids.max_devids) || (icid > s->ct.maxids.max_collids)
+ || !dte_valid || (eventid > max_eventid) ||
+ (!ignore_pInt && (((pIntid < GICV3_LPI_INTID_START) ||
+ (pIntid > max_Intid)) && (pIntid != INTID_SPURIOUS)))) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes "
+ "devid %d or icid %d or eventid %d or pIntid %d or"
+ "unmapped dte %d\n", __func__, devid, icid, eventid,
+ pIntid, dte_valid);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ /* add ite entry to interrupt translation table */
+ ite.itel = (dte_valid & TABLE_ENTRY_VALID_MASK) |
+ (GITS_TYPE_PHYSICAL << ITE_ENTRY_INTTYPE_SHIFT);
+
+ if (ignore_pInt) {
+ ite.itel |= (eventid << ITE_ENTRY_INTID_SHIFT);
+ } else {
+ ite.itel |= (pIntid << ITE_ENTRY_INTID_SHIFT);
+ }
+ ite.itel |= (int_spurious << ITE_ENTRY_INTSP_SHIFT);
+ ite.iteh = icid;
+
+ result = update_ite(s, eventid, dte, ite);
+ }
+
+ return result;
+}
+
+static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
+ uint64_t rdbase)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t value;
+ uint64_t l2t_addr;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+ uint64_t cte = 0;
+ MemTxResult res = MEMTX_OK;
+
+ if (!s->ct.valid) {
+ return true;
+ }
+
+ if (valid) {
+ /* add mapping entry to collection table */
+ cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL);
+ }
+
+ /*
+ * The specification defines the format of level 1 entries of a
+ * 2-level table, but the format of level 2 entries and the format
+ * of flat-mapped tables is IMPDEF.
+ */
+ if (s->ct.indirect) {
+ l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->ct.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return false;
+ }
+
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ address_space_stq_le(as, l2t_addr +
+ ((icid % max_l2_entries) * GITS_CTE_SIZE),
+ cte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ } else {
+ /* Flat level table */
+ address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
+ cte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ if (res != MEMTX_OK) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool process_mapc(GICv3ITSState *s, uint32_t offset)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint16_t icid;
+ uint64_t rdbase;
+ bool valid;
+ MemTxResult res = MEMTX_OK;
+ bool result = false;
+ uint64_t value;
+
+ offset += NUM_BYTES_IN_DW;
+ offset += NUM_BYTES_IN_DW;
+
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ icid = value & ICID_MASK;
+
+ rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
+ rdbase &= RDBASE_PROCNUM_MASK;
+
+ valid = (value & CMD_FIELD_VALID_MASK);
+
+ if ((icid > s->ct.maxids.max_collids) || (rdbase > s->gicv3->num_cpu)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS MAPC: invalid collection table attributes "
+ "icid %d rdbase %" PRIu64 "\n", icid, rdbase);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ result = update_cte(s, icid, valid, rdbase);
+ }
+
+ return result;
+}
+
+static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
+ uint8_t size, uint64_t itt_addr)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t value;
+ uint64_t l2t_addr;
+ bool valid_l2t;
+ uint32_t l2t_id;
+ uint32_t max_l2_entries;
+ uint64_t dte = 0;
+ MemTxResult res = MEMTX_OK;
+
+ if (s->dt.valid) {
+ if (valid) {
+ /* add mapping entry to device table */
+ dte = (valid & TABLE_ENTRY_VALID_MASK) |
+ ((size & SIZE_MASK) << 1U) |
+ (itt_addr << GITS_DTE_ITTADDR_SHIFT);
+ }
+ } else {
+ return true;
+ }
+
+ /*
+ * The specification defines the format of level 1 entries of a
+ * 2-level table, but the format of level 2 entries and the format
+ * of flat-mapped tables is IMPDEF.
+ */
+ if (s->dt.indirect) {
+ l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
+
+ value = address_space_ldq_le(as,
+ s->dt.base_addr +
+ (l2t_id * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return false;
+ }
+
+ valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+
+ if (valid_l2t) {
+ max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+
+ l2t_addr = value & ((1ULL << 51) - 1);
+
+ address_space_stq_le(as, l2t_addr +
+ ((devid % max_l2_entries) * GITS_DTE_SIZE),
+ dte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ } else {
+ /* Flat level table */
+ address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
+ dte, MEMTXATTRS_UNSPECIFIED, &res);
+ }
+ if (res != MEMTX_OK) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint32_t devid;
+ uint8_t size;
+ uint64_t itt_addr;
+ bool valid;
+ MemTxResult res = MEMTX_OK;
+ bool result = false;
+
+ devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ size = (value & SIZE_MASK);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+
+ if (res != MEMTX_OK) {
+ return result;
+ }
+
+ itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
+
+ valid = (value & CMD_FIELD_VALID_MASK);
+
+ if ((devid > s->dt.maxids.max_devids) ||
+ (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS MAPD: invalid device table attributes "
+ "devid %d or size %d\n", devid, size);
+ /*
+ * in this implementation, in case of error
+ * we ignore this command and move onto the next
+ * command in the queue
+ */
+ } else {
+ result = update_dte(s, devid, valid, size, itt_addr);
+ }
+
+ return result;
+}
+
+/*
+ * Current implementation blocks until all
+ * commands are processed
+ */
+static void process_cmdq(GICv3ITSState *s)
+{
+ uint32_t wr_offset = 0;
+ uint32_t rd_offset = 0;
+ uint32_t cq_offset = 0;
+ uint64_t data;
+ AddressSpace *as = &s->gicv3->dma_as;
+ MemTxResult res = MEMTX_OK;
+ bool result = true;
+ uint8_t cmd;
+ int i;
+
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ return;
+ }
+
+ wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
+
+ if (wr_offset > s->cq.max_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid write offset "
+ "%d\n", __func__, wr_offset);
+ return;
+ }
+
+ rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
+
+ if (rd_offset > s->cq.max_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid read offset "
+ "%d\n", __func__, rd_offset);
+ return;
+ }
+
+ while (wr_offset != rd_offset) {
+ cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
+ data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ result = false;
+ }
+ cmd = (data & CMD_MASK);
+
+ switch (cmd) {
+ case GITS_CMD_INT:
+ res = process_its_cmd(s, data, cq_offset, INTERRUPT);
+ break;
+ case GITS_CMD_CLEAR:
+ res = process_its_cmd(s, data, cq_offset, CLEAR);
+ break;
+ case GITS_CMD_SYNC:
+ /*
+ * Current implementation makes a blocking synchronous call
+ * for every command issued earlier, hence the internal state
+ * is already consistent by the time SYNC command is executed.
+ * Hence no further processing is required for SYNC command.
+ */
+ break;
+ case GITS_CMD_MAPD:
+ result = process_mapd(s, data, cq_offset);
+ break;
+ case GITS_CMD_MAPC:
+ result = process_mapc(s, cq_offset);
+ break;
+ case GITS_CMD_MAPTI:
+ result = process_mapti(s, data, cq_offset, false);
+ break;
+ case GITS_CMD_MAPI:
+ result = process_mapti(s, data, cq_offset, true);
+ break;
+ case GITS_CMD_DISCARD:
+ result = process_its_cmd(s, data, cq_offset, DISCARD);
+ break;
+ case GITS_CMD_INV:
+ case GITS_CMD_INVALL:
+ /*
+ * Current implementation doesn't cache any ITS tables,
+ * but the calculated lpi priority information. We only
+ * need to trigger lpi priority re-calculation to be in
+ * sync with LPI config table or pending table changes.
+ */
+ for (i = 0; i < s->gicv3->num_cpu; i++) {
+ gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
+ }
+ break;
+ default:
+ break;
+ }
+ if (result) {
+ rd_offset++;
+ rd_offset %= s->cq.max_entries;
+ s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
+ } else {
+ /*
+ * in this implementation, in case of dma read/write error
+ * we stall the command processing
+ */
+ s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: %x cmd processing failed\n", __func__, cmd);
+ break;
+ }
+ }
+}
+
+/*
+ * This function extracts the ITS Device and Collection table specific
+ * parameters (like base_addr, size etc) from GITS_BASER register.
+ * It is called during ITS enable and also during post_load migration
+ */
+static void extract_table_params(GICv3ITSState *s)
+{
+ uint16_t num_pages = 0;
+ uint8_t page_sz_type;
+ uint8_t type;
+ uint32_t page_sz = 0;
+ uint64_t value;
+
+ for (int i = 0; i < 8; i++) {
+ value = s->baser[i];
+
+ if (!value) {
+ continue;
+ }
+
+ page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
+
+ switch (page_sz_type) {
+ case 0:
+ page_sz = GITS_PAGE_SIZE_4K;
+ break;
+
+ case 1:
+ page_sz = GITS_PAGE_SIZE_16K;
+ break;
+
+ case 2:
+ case 3:
+ page_sz = GITS_PAGE_SIZE_64K;
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
+
+ type = FIELD_EX64(value, GITS_BASER, TYPE);
+
+ switch (type) {
+
+ case GITS_BASER_TYPE_DEVICE:
+ memset(&s->dt, 0 , sizeof(s->dt));
+ s->dt.valid = FIELD_EX64(value, GITS_BASER, VALID);
+
+ if (!s->dt.valid) {
+ return;
+ }
+
+ s->dt.page_sz = page_sz;
+ s->dt.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
+ s->dt.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
+
+ if (!s->dt.indirect) {
+ s->dt.max_entries = (num_pages * page_sz) / s->dt.entry_sz;
+ } else {
+ s->dt.max_entries = (((num_pages * page_sz) /
+ L1TABLE_ENTRY_SIZE) *
+ (page_sz / s->dt.entry_sz));
+ }
+
+ s->dt.maxids.max_devids = (1UL << (FIELD_EX64(s->typer, GITS_TYPER,
+ DEVBITS) + 1));
+
+ s->dt.base_addr = baser_base_addr(value, page_sz);
+
+ break;
+
+ case GITS_BASER_TYPE_COLLECTION:
+ memset(&s->ct, 0 , sizeof(s->ct));
+ s->ct.valid = FIELD_EX64(value, GITS_BASER, VALID);
+
+ /*
+ * GITS_TYPER.HCC is 0 for this implementation
+ * hence writes are discarded if ct.valid is 0
+ */
+ if (!s->ct.valid) {
+ return;
+ }
+
+ s->ct.page_sz = page_sz;
+ s->ct.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
+ s->ct.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
+
+ if (!s->ct.indirect) {
+ s->ct.max_entries = (num_pages * page_sz) / s->ct.entry_sz;
+ } else {
+ s->ct.max_entries = (((num_pages * page_sz) /
+ L1TABLE_ENTRY_SIZE) *
+ (page_sz / s->ct.entry_sz));
+ }
+
+ if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
+ s->ct.maxids.max_collids = (1UL << (FIELD_EX64(s->typer,
+ GITS_TYPER, CIDBITS) + 1));
+ } else {
+ /* 16-bit CollectionId supported when CIL == 0 */
+ s->ct.maxids.max_collids = (1UL << 16);
+ }
+
+ s->ct.base_addr = baser_base_addr(value, page_sz);
+
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static void extract_cmdq_params(GICv3ITSState *s)
+{
+ uint16_t num_pages = 0;
+ uint64_t value = s->cbaser;
+
+ num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
+
+ memset(&s->cq, 0 , sizeof(s->cq));
+ s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
+
+ if (s->cq.valid) {
+ s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) /
+ GITS_CMDQ_ENTRY_SIZE;
+ s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
+ s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
+ }
+}
+
+static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
+ uint64_t data, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ bool result = true;
+ uint32_t devid = 0;
+
+ switch (offset) {
+ case GITS_TRANSLATER:
+ if (s->ctlr & ITS_CTLR_ENABLED) {
+ devid = attrs.requester_id;
+ result = process_its_cmd(s, data, devid, NONE);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (result) {
+ return MEMTX_OK;
+ } else {
+ return MEMTX_ERROR;
+ }
+}
+
+static bool its_writel(GICv3ITSState *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ bool result = true;
+ int index;
+
+ switch (offset) {
+ case GITS_CTLR:
+ if (value & R_GITS_CTLR_ENABLED_MASK) {
+ s->ctlr |= ITS_CTLR_ENABLED;
+ extract_table_params(s);
+ extract_cmdq_params(s);
+ s->creadr = 0;
+ process_cmdq(s);
+ } else {
+ s->ctlr &= ~ITS_CTLR_ENABLED;
+ }
+ break;
+ case GITS_CBASER:
+ /*
+ * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ s->cbaser = deposit64(s->cbaser, 0, 32, value);
+ s->creadr = 0;
+ s->cwriter = s->creadr;
+ }
+ break;
+ case GITS_CBASER + 4:
+ /*
+ * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ s->cbaser = deposit64(s->cbaser, 32, 32, value);
+ s->creadr = 0;
+ s->cwriter = s->creadr;
+ }
+ break;
+ case GITS_CWRITER:
+ s->cwriter = deposit64(s->cwriter, 0, 32,
+ (value & ~R_GITS_CWRITER_RETRY_MASK));
+ if (s->cwriter != s->creadr) {
+ process_cmdq(s);
+ }
+ break;
+ case GITS_CWRITER + 4:
+ s->cwriter = deposit64(s->cwriter, 32, 32, value);
+ break;
+ case GITS_CREADR:
+ if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
+ s->creadr = deposit64(s->creadr, 0, 32,
+ (value & ~R_GITS_CREADR_STALLED_MASK));
+ } else {
+ /* RO register, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ }
+ break;
+ case GITS_CREADR + 4:
+ if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
+ s->creadr = deposit64(s->creadr, 32, 32, value);
+ } else {
+ /* RO register, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ }
+ break;
+ case GITS_BASER ... GITS_BASER + 0x3f:
+ /*
+ * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ index = (offset - GITS_BASER) / 8;
+
+ if (offset & 7) {
+ value <<= 32;
+ value &= ~GITS_BASER_RO_MASK;
+ s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
+ s->baser[index] |= value;
+ } else {
+ value &= ~GITS_BASER_RO_MASK;
+ s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
+ s->baser[index] |= value;
+ }
+ }
+ break;
+ case GITS_IIDR:
+ case GITS_IDREGS ... GITS_IDREGS + 0x2f:
+ /* RO registers, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+static bool its_readl(GICv3ITSState *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ bool result = true;
+ int index;
+
+ switch (offset) {
+ case GITS_CTLR:
+ *data = s->ctlr;
+ break;
+ case GITS_IIDR:
+ *data = gicv3_iidr();
+ break;
+ case GITS_IDREGS ... GITS_IDREGS + 0x2f:
+ /* ID registers */
+ *data = gicv3_idreg(offset - GITS_IDREGS);
+ break;
+ case GITS_TYPER:
+ *data = extract64(s->typer, 0, 32);
+ break;
+ case GITS_TYPER + 4:
+ *data = extract64(s->typer, 32, 32);
+ break;
+ case GITS_CBASER:
+ *data = extract64(s->cbaser, 0, 32);
+ break;
+ case GITS_CBASER + 4:
+ *data = extract64(s->cbaser, 32, 32);
+ break;
+ case GITS_CREADR:
+ *data = extract64(s->creadr, 0, 32);
+ break;
+ case GITS_CREADR + 4:
+ *data = extract64(s->creadr, 32, 32);
+ break;
+ case GITS_CWRITER:
+ *data = extract64(s->cwriter, 0, 32);
+ break;
+ case GITS_CWRITER + 4:
+ *data = extract64(s->cwriter, 32, 32);
+ break;
+ case GITS_BASER ... GITS_BASER + 0x3f:
+ index = (offset - GITS_BASER) / 8;
+ if (offset & 7) {
+ *data = extract64(s->baser[index], 32, 32);
+ } else {
+ *data = extract64(s->baser[index], 0, 32);
+ }
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+static bool its_writell(GICv3ITSState *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ bool result = true;
+ int index;
+
+ switch (offset) {
+ case GITS_BASER ... GITS_BASER + 0x3f:
+ /*
+ * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ index = (offset - GITS_BASER) / 8;
+ s->baser[index] &= GITS_BASER_RO_MASK;
+ s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
+ }
+ break;
+ case GITS_CBASER:
+ /*
+ * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
+ * already enabled
+ */
+ if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ s->cbaser = value;
+ s->creadr = 0;
+ s->cwriter = s->creadr;
+ }
+ break;
+ case GITS_CWRITER:
+ s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
+ if (s->cwriter != s->creadr) {
+ process_cmdq(s);
+ }
+ break;
+ case GITS_CREADR:
+ if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
+ s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
+ } else {
+ /* RO register, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ }
+ break;
+ case GITS_TYPER:
+ /* RO registers, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+static bool its_readll(GICv3ITSState *s, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ bool result = true;
+ int index;
+
+ switch (offset) {
+ case GITS_TYPER:
+ *data = s->typer;
+ break;
+ case GITS_BASER ... GITS_BASER + 0x3f:
+ index = (offset - GITS_BASER) / 8;
+ *data = s->baser[index];
+ break;
+ case GITS_CBASER:
+ *data = s->cbaser;
+ break;
+ case GITS_CREADR:
+ *data = s->creadr;
+ break;
+ case GITS_CWRITER:
+ *data = s->cwriter;
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ bool result;
+
+ switch (size) {
+ case 4:
+ result = its_readl(s, offset, data, attrs);
+ break;
+ case 8:
+ result = its_readll(s, offset, data, attrs);
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ if (!result) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest read at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+ /*
+ * The spec requires that reserved registers are RAZ/WI;
+ * so use false returns from leaf functions as a way to
+ * trigger the guest-error logging but don't return it to
+ * the caller, or we'll cause a spurious guest data abort.
+ */
+ *data = 0;
+ }
+ return MEMTX_OK;
+}
+
+static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ bool result;
+
+ switch (size) {
+ case 4:
+ result = its_writel(s, offset, data, attrs);
+ break;
+ case 8:
+ result = its_writell(s, offset, data, attrs);
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ if (!result) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+ /*
+ * The spec requires that reserved registers are RAZ/WI;
+ * so use false returns from leaf functions as a way to
+ * trigger the guest-error logging but don't return it to
+ * the caller, or we'll cause a spurious guest data abort.
+ */
+ }
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps gicv3_its_control_ops = {
+ .read_with_attrs = gicv3_its_read,
+ .write_with_attrs = gicv3_its_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const MemoryRegionOps gicv3_its_translation_ops = {
+ .write_with_attrs = gicv3_its_translation_write,
+ .valid.min_access_size = 2,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 2,
+ .impl.max_access_size = 4,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+ int i;
+
+ for (i = 0; i < s->gicv3->num_cpu; i++) {
+ if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
+ error_setg(errp, "Physical LPI not supported by CPU %d", i);
+ return;
+ }
+ }
+
+ gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
+
+ address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
+ "gicv3-its-sysmem");
+
+ /* set the ITS default features supported */
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL,
+ GITS_TYPE_PHYSICAL);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
+ ITS_ITT_ENTRY_SIZE - 1);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
+}
+
+static void gicv3_its_reset(DeviceState *dev)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+ GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
+
+ c->parent_reset(dev);
+
+ /* Quiescent bit reset to 1 */
+ s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
+
+ /*
+ * setting GITS_BASER0.Type = 0b001 (Device)
+ * GITS_BASER1.Type = 0b100 (Collection Table)
+ * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
+ * GITS_BASER<0,1>.Page_Size = 64KB
+ * and default translation table entry size to 16 bytes
+ */
+ s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
+ GITS_BASER_TYPE_DEVICE);
+ s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
+ GITS_BASER_PAGESIZE_64K);
+ s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
+ GITS_DTE_SIZE - 1);
+
+ s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
+ GITS_BASER_TYPE_COLLECTION);
+ s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
+ GITS_BASER_PAGESIZE_64K);
+ s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
+ GITS_CTE_SIZE - 1);
+}
+
+static void gicv3_its_post_load(GICv3ITSState *s)
+{
+ if (s->ctlr & ITS_CTLR_ENABLED) {
+ extract_table_params(s);
+ extract_cmdq_params(s);
+ }
+}
+
+static Property gicv3_its_props[] = {
+ DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
+ GICv3State *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void gicv3_its_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
+ GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
+
+ dc->realize = gicv3_arm_its_realize;
+ device_class_set_props(dc, gicv3_its_props);
+ device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
+ icc->post_load = gicv3_its_post_load;
+}
+
+static const TypeInfo gicv3_its_info = {
+ .name = TYPE_ARM_GICV3_ITS,
+ .parent = TYPE_ARM_GICV3_ITS_COMMON,
+ .instance_size = sizeof(GICv3ITSState),
+ .class_init = gicv3_its_class_init,
+ .class_size = sizeof(GICv3ITSClass),
+};
+
+static void gicv3_its_register_types(void)
+{
+ type_register_static(&gicv3_its_info);
+}
+
+type_init(gicv3_its_register_types)
diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c
new file mode 100644
index 000000000..90b85f1e2
--- /dev/null
+++ b/hw/intc/arm_gicv3_its_common.c
@@ -0,0 +1,159 @@
+/*
+ * ITS base class for a GICv3-based system
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Written by Pavel Fedin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/msi.h"
+#include "migration/vmstate.h"
+#include "hw/intc/arm_gicv3_its_common.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+
+static int gicv3_its_pre_save(void *opaque)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ GICv3ITSCommonClass *c = ARM_GICV3_ITS_COMMON_GET_CLASS(s);
+
+ if (c->pre_save) {
+ c->pre_save(s);
+ }
+
+ return 0;
+}
+
+static int gicv3_its_post_load(void *opaque, int version_id)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ GICv3ITSCommonClass *c = ARM_GICV3_ITS_COMMON_GET_CLASS(s);
+
+ if (c->post_load) {
+ c->post_load(s);
+ }
+ return 0;
+}
+
+static const VMStateDescription vmstate_its = {
+ .name = "arm_gicv3_its",
+ .pre_save = gicv3_its_pre_save,
+ .post_load = gicv3_its_post_load,
+ .priority = MIG_PRI_GICV3_ITS,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(ctlr, GICv3ITSState),
+ VMSTATE_UINT32(iidr, GICv3ITSState),
+ VMSTATE_UINT64(cbaser, GICv3ITSState),
+ VMSTATE_UINT64(cwriter, GICv3ITSState),
+ VMSTATE_UINT64(creadr, GICv3ITSState),
+ VMSTATE_UINT64_ARRAY(baser, GICv3ITSState, 8),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static MemTxResult gicv3_its_trans_read(void *opaque, hwaddr offset,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ qemu_log_mask(LOG_GUEST_ERROR, "ITS read at offset 0x%"PRIx64"\n", offset);
+ *data = 0;
+ return MEMTX_OK;
+}
+
+static MemTxResult gicv3_its_trans_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ if (offset == 0x0040 && ((size == 2) || (size == 4))) {
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(opaque);
+ GICv3ITSCommonClass *c = ARM_GICV3_ITS_COMMON_GET_CLASS(s);
+ int ret = c->send_msi(s, le64_to_cpu(value), attrs.requester_id);
+
+ if (ret <= 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS: Error sending MSI: %s\n", strerror(-ret));
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS write at bad offset 0x%"PRIx64"\n", offset);
+ }
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps gicv3_its_trans_ops = {
+ .read_with_attrs = gicv3_its_trans_read,
+ .write_with_attrs = gicv3_its_trans_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops,
+ const MemoryRegionOps *tops)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(s);
+
+ memory_region_init_io(&s->iomem_its_cntrl, OBJECT(s), ops, s,
+ "control", ITS_CONTROL_SIZE);
+ memory_region_init_io(&s->iomem_its_translation, OBJECT(s),
+ tops ? tops : &gicv3_its_trans_ops, s,
+ "translation", ITS_TRANS_SIZE);
+
+ /* Our two regions are always adjacent, therefore we now combine them
+ * into a single one in order to make our users' life easier.
+ */
+ memory_region_init(&s->iomem_main, OBJECT(s), "gicv3_its", ITS_SIZE);
+ memory_region_add_subregion(&s->iomem_main, 0, &s->iomem_its_cntrl);
+ memory_region_add_subregion(&s->iomem_main, ITS_CONTROL_SIZE,
+ &s->iomem_its_translation);
+ sysbus_init_mmio(sbd, &s->iomem_main);
+
+ msi_nonbroken = true;
+}
+
+static void gicv3_its_common_reset(DeviceState *dev)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+
+ s->ctlr = 0;
+ s->cbaser = 0;
+ s->cwriter = 0;
+ s->creadr = 0;
+ s->iidr = 0;
+ memset(&s->baser, 0, sizeof(s->baser));
+}
+
+static void gicv3_its_common_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = gicv3_its_common_reset;
+ dc->vmsd = &vmstate_its;
+}
+
+static const TypeInfo gicv3_its_common_info = {
+ .name = TYPE_ARM_GICV3_ITS_COMMON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(GICv3ITSState),
+ .class_size = sizeof(GICv3ITSCommonClass),
+ .class_init = gicv3_its_common_class_init,
+ .abstract = true,
+};
+
+static void gicv3_its_common_register_types(void)
+{
+ type_register_static(&gicv3_its_common_info);
+}
+
+type_init(gicv3_its_common_register_types)
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
new file mode 100644
index 000000000..0b4cbed28
--- /dev/null
+++ b/hw/intc/arm_gicv3_its_kvm.c
@@ -0,0 +1,266 @@
+/*
+ * KVM-based ITS implementation for a GICv3-based system
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Written by Pavel Fedin <p.fedin@samsung.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "hw/intc/arm_gicv3_its_common.h"
+#include "hw/qdev-properties.h"
+#include "sysemu/runstate.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "migration/blocker.h"
+#include "qom/object.h"
+
+#define TYPE_KVM_ARM_ITS "arm-its-kvm"
+typedef struct KVMARMITSClass KVMARMITSClass;
+/* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
+DECLARE_OBJ_CHECKERS(GICv3ITSState, KVMARMITSClass,
+ KVM_ARM_ITS, TYPE_KVM_ARM_ITS)
+
+struct KVMARMITSClass {
+ GICv3ITSCommonClass parent_class;
+ void (*parent_reset)(DeviceState *dev);
+};
+
+
+static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint16_t devid)
+{
+ struct kvm_msi msi;
+
+ if (unlikely(!s->translater_gpa_known)) {
+ MemoryRegion *mr = &s->iomem_its_translation;
+ MemoryRegionSection mrs;
+
+ mrs = memory_region_find(mr, 0, 1);
+ memory_region_unref(mrs.mr);
+ s->gits_translater_gpa = mrs.offset_within_address_space + 0x40;
+ s->translater_gpa_known = true;
+ }
+
+ msi.address_lo = extract64(s->gits_translater_gpa, 0, 32);
+ msi.address_hi = extract64(s->gits_translater_gpa, 32, 32);
+ msi.data = le32_to_cpu(value);
+ msi.flags = KVM_MSI_VALID_DEVID;
+ msi.devid = devid;
+ memset(msi.pad, 0, sizeof(msi.pad));
+
+ return kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi);
+}
+
+/**
+ * vm_change_state_handler - VM change state callback aiming at flushing
+ * ITS tables into guest RAM
+ *
+ * The tables get flushed to guest RAM whenever the VM gets stopped.
+ */
+static void vm_change_state_handler(void *opaque, bool running,
+ RunState state)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ Error *err = NULL;
+
+ if (running) {
+ return;
+ }
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_ITS_SAVE_TABLES, NULL, true, &err);
+ if (err) {
+ error_report_err(err);
+ }
+}
+
+static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+
+ s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_ITS, false);
+ if (s->dev_fd < 0) {
+ error_setg_errno(errp, -s->dev_fd, "error creating in-kernel ITS");
+ return;
+ }
+
+ /* explicit init of the ITS */
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, &error_abort);
+
+ /* register the base address */
+ kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd, 0);
+
+ gicv3_its_init_mmio(s, NULL, NULL);
+
+ if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CTLR)) {
+ error_setg(&s->migration_blocker, "This operating system kernel "
+ "does not support vITS migration");
+ if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
+ error_free(s->migration_blocker);
+ return;
+ }
+ } else {
+ qemu_add_vm_change_state_handler(vm_change_state_handler, s);
+ }
+
+ kvm_msi_use_devid = true;
+ kvm_gsi_direct_mapping = false;
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
+}
+
+/**
+ * kvm_arm_its_pre_save - handles the saving of ITS registers.
+ * ITS tables are flushed into guest RAM separately and earlier,
+ * through the VM change state handler, since at the moment pre_save()
+ * is called, the guest RAM has already been saved.
+ */
+static void kvm_arm_its_pre_save(GICv3ITSState *s)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_BASER + i * 8, &s->baser[i], false,
+ &error_abort);
+ }
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CTLR, &s->ctlr, false, &error_abort);
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CBASER, &s->cbaser, false, &error_abort);
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CREADR, &s->creadr, false, &error_abort);
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CWRITER, &s->cwriter, false, &error_abort);
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_IIDR, &s->iidr, false, &error_abort);
+}
+
+/**
+ * kvm_arm_its_post_load - Restore both the ITS registers and tables
+ */
+static void kvm_arm_its_post_load(GICv3ITSState *s)
+{
+ int i;
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_IIDR, &s->iidr, true, &error_abort);
+
+ /*
+ * must be written before GITS_CREADR since GITS_CBASER write
+ * access resets GITS_CREADR.
+ */
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CBASER, &s->cbaser, true, &error_abort);
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CREADR, &s->creadr, true, &error_abort);
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CWRITER, &s->cwriter, true, &error_abort);
+
+
+ for (i = 0; i < 8; i++) {
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_BASER + i * 8, &s->baser[i], true,
+ &error_abort);
+ }
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_ITS_RESTORE_TABLES, NULL, true,
+ &error_abort);
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CTLR, &s->ctlr, true, &error_abort);
+}
+
+static void kvm_arm_its_reset(DeviceState *dev)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+ KVMARMITSClass *c = KVM_ARM_ITS_GET_CLASS(s);
+ int i;
+
+ c->parent_reset(dev);
+
+ if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_ITS_CTRL_RESET)) {
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_ITS_CTRL_RESET, NULL, true, &error_abort);
+ return;
+ }
+
+ warn_report("ITS KVM: full reset is not supported by the host kernel");
+
+ if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CTLR)) {
+ return;
+ }
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CTLR, &s->ctlr, true, &error_abort);
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CBASER, &s->cbaser, true, &error_abort);
+
+ for (i = 0; i < 8; i++) {
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_BASER + i * 8, &s->baser[i], true,
+ &error_abort);
+ }
+}
+
+static Property kvm_arm_its_props[] = {
+ DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "kvm-arm-gicv3",
+ GICv3State *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void kvm_arm_its_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
+ KVMARMITSClass *ic = KVM_ARM_ITS_CLASS(klass);
+
+ dc->realize = kvm_arm_its_realize;
+ device_class_set_props(dc, kvm_arm_its_props);
+ device_class_set_parent_reset(dc, kvm_arm_its_reset, &ic->parent_reset);
+ icc->send_msi = kvm_its_send_msi;
+ icc->pre_save = kvm_arm_its_pre_save;
+ icc->post_load = kvm_arm_its_post_load;
+}
+
+static const TypeInfo kvm_arm_its_info = {
+ .name = TYPE_KVM_ARM_ITS,
+ .parent = TYPE_ARM_GICV3_ITS_COMMON,
+ .instance_size = sizeof(GICv3ITSState),
+ .class_init = kvm_arm_its_class_init,
+ .class_size = sizeof(KVMARMITSClass),
+};
+
+static void kvm_arm_its_register_types(void)
+{
+ type_register_static(&kvm_arm_its_info);
+}
+
+type_init(kvm_arm_its_register_types)
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
new file mode 100644
index 000000000..5ec5ff9ef
--- /dev/null
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -0,0 +1,900 @@
+/*
+ * ARM Generic Interrupt Controller using KVM in-kernel support
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Written by Pavel Fedin
+ * Based on vGICv2 code by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/intc/arm_gicv3_common.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "sysemu/kvm.h"
+#include "sysemu/runstate.h"
+#include "kvm_arm.h"
+#include "gicv3_internal.h"
+#include "vgic_common.h"
+#include "migration/blocker.h"
+#include "qom/object.h"
+
+#ifdef DEBUG_GICV3_KVM
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, "kvm_gicv3: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define TYPE_KVM_ARM_GICV3 "kvm-arm-gicv3"
+typedef struct KVMARMGICv3Class KVMARMGICv3Class;
+/* This is reusing the GICv3State typedef from ARM_GICV3_ITS_COMMON */
+DECLARE_OBJ_CHECKERS(GICv3State, KVMARMGICv3Class,
+ KVM_ARM_GICV3, TYPE_KVM_ARM_GICV3)
+
+#define KVM_DEV_ARM_VGIC_SYSREG(op0, op1, crn, crm, op2) \
+ (ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \
+ ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \
+ ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \
+ ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \
+ ARM64_SYS_REG_SHIFT_MASK(op2, OP2))
+
+#define ICC_PMR_EL1 \
+ KVM_DEV_ARM_VGIC_SYSREG(3, 0, 4, 6, 0)
+#define ICC_BPR0_EL1 \
+ KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 3)
+#define ICC_AP0R_EL1(n) \
+ KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 4 | n)
+#define ICC_AP1R_EL1(n) \
+ KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 9, n)
+#define ICC_BPR1_EL1 \
+ KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 3)
+#define ICC_CTLR_EL1 \
+ KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 4)
+#define ICC_SRE_EL1 \
+ KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 5)
+#define ICC_IGRPEN0_EL1 \
+ KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 6)
+#define ICC_IGRPEN1_EL1 \
+ KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 7)
+
+struct KVMARMGICv3Class {
+ ARMGICv3CommonClass parent_class;
+ DeviceRealize parent_realize;
+ void (*parent_reset)(DeviceState *dev);
+};
+
+static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level)
+{
+ GICv3State *s = (GICv3State *)opaque;
+
+ kvm_arm_gic_set_irq(s->num_irq, irq, level);
+}
+
+#define KVM_VGIC_ATTR(reg, typer) \
+ ((typer & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) | (reg))
+
+static inline void kvm_gicd_access(GICv3State *s, int offset,
+ uint32_t *val, bool write)
+{
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
+ KVM_VGIC_ATTR(offset, 0),
+ val, write, &error_abort);
+}
+
+static inline void kvm_gicr_access(GICv3State *s, int offset, int cpu,
+ uint32_t *val, bool write)
+{
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
+ KVM_VGIC_ATTR(offset, s->cpu[cpu].gicr_typer),
+ val, write, &error_abort);
+}
+
+static inline void kvm_gicc_access(GICv3State *s, uint64_t reg, int cpu,
+ uint64_t *val, bool write)
+{
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
+ KVM_VGIC_ATTR(reg, s->cpu[cpu].gicr_typer),
+ val, write, &error_abort);
+}
+
+static inline void kvm_gic_line_level_access(GICv3State *s, int irq, int cpu,
+ uint32_t *val, bool write)
+{
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
+ KVM_VGIC_ATTR(irq, s->cpu[cpu].gicr_typer) |
+ (VGIC_LEVEL_INFO_LINE_LEVEL <<
+ KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT),
+ val, write, &error_abort);
+}
+
+/* Loop through each distributor IRQ related register; since bits
+ * corresponding to SPIs and PPIs are RAZ/WI when affinity routing
+ * is enabled, we skip those.
+ */
+#define for_each_dist_irq_reg(_irq, _max, _field_width) \
+ for (_irq = GIC_INTERNAL; _irq < _max; _irq += (32 / _field_width))
+
+static void kvm_dist_get_priority(GICv3State *s, uint32_t offset, uint8_t *bmp)
+{
+ uint32_t reg, *field;
+ int irq;
+
+ /* For the KVM GICv3, affinity routing is always enabled, and the first 8
+ * GICD_IPRIORITYR<n> registers are always RAZ/WI. The corresponding
+ * functionality is replaced by GICR_IPRIORITYR<n>. It doesn't need to
+ * sync them. So it needs to skip the field of GIC_INTERNAL irqs in bmp and
+ * offset.
+ */
+ field = (uint32_t *)(bmp + GIC_INTERNAL);
+ offset += (GIC_INTERNAL * 8) / 8;
+ for_each_dist_irq_reg(irq, s->num_irq, 8) {
+ kvm_gicd_access(s, offset, &reg, false);
+ *field = reg;
+ offset += 4;
+ field++;
+ }
+}
+
+static void kvm_dist_put_priority(GICv3State *s, uint32_t offset, uint8_t *bmp)
+{
+ uint32_t reg, *field;
+ int irq;
+
+ /* For the KVM GICv3, affinity routing is always enabled, and the first 8
+ * GICD_IPRIORITYR<n> registers are always RAZ/WI. The corresponding
+ * functionality is replaced by GICR_IPRIORITYR<n>. It doesn't need to
+ * sync them. So it needs to skip the field of GIC_INTERNAL irqs in bmp and
+ * offset.
+ */
+ field = (uint32_t *)(bmp + GIC_INTERNAL);
+ offset += (GIC_INTERNAL * 8) / 8;
+ for_each_dist_irq_reg(irq, s->num_irq, 8) {
+ reg = *field;
+ kvm_gicd_access(s, offset, &reg, true);
+ offset += 4;
+ field++;
+ }
+}
+
+static void kvm_dist_get_edge_trigger(GICv3State *s, uint32_t offset,
+ uint32_t *bmp)
+{
+ uint32_t reg;
+ int irq;
+
+ /* For the KVM GICv3, affinity routing is always enabled, and the first 2
+ * GICD_ICFGR<n> registers are always RAZ/WI. The corresponding
+ * functionality is replaced by GICR_ICFGR<n>. It doesn't need to sync
+ * them. So it should increase the offset to skip GIC_INTERNAL irqs.
+ * This matches the for_each_dist_irq_reg() macro which also skips the
+ * first GIC_INTERNAL irqs.
+ */
+ offset += (GIC_INTERNAL * 2) / 8;
+ for_each_dist_irq_reg(irq, s->num_irq, 2) {
+ kvm_gicd_access(s, offset, &reg, false);
+ reg = half_unshuffle32(reg >> 1);
+ if (irq % 32 != 0) {
+ reg = (reg << 16);
+ }
+ *gic_bmp_ptr32(bmp, irq) |= reg;
+ offset += 4;
+ }
+}
+
+static void kvm_dist_put_edge_trigger(GICv3State *s, uint32_t offset,
+ uint32_t *bmp)
+{
+ uint32_t reg;
+ int irq;
+
+ /* For the KVM GICv3, affinity routing is always enabled, and the first 2
+ * GICD_ICFGR<n> registers are always RAZ/WI. The corresponding
+ * functionality is replaced by GICR_ICFGR<n>. It doesn't need to sync
+ * them. So it should increase the offset to skip GIC_INTERNAL irqs.
+ * This matches the for_each_dist_irq_reg() macro which also skips the
+ * first GIC_INTERNAL irqs.
+ */
+ offset += (GIC_INTERNAL * 2) / 8;
+ for_each_dist_irq_reg(irq, s->num_irq, 2) {
+ reg = *gic_bmp_ptr32(bmp, irq);
+ if (irq % 32 != 0) {
+ reg = (reg & 0xffff0000) >> 16;
+ } else {
+ reg = reg & 0xffff;
+ }
+ reg = half_shuffle32(reg) << 1;
+ kvm_gicd_access(s, offset, &reg, true);
+ offset += 4;
+ }
+}
+
+static void kvm_gic_get_line_level_bmp(GICv3State *s, uint32_t *bmp)
+{
+ uint32_t reg;
+ int irq;
+
+ for_each_dist_irq_reg(irq, s->num_irq, 1) {
+ kvm_gic_line_level_access(s, irq, 0, &reg, false);
+ *gic_bmp_ptr32(bmp, irq) = reg;
+ }
+}
+
+static void kvm_gic_put_line_level_bmp(GICv3State *s, uint32_t *bmp)
+{
+ uint32_t reg;
+ int irq;
+
+ for_each_dist_irq_reg(irq, s->num_irq, 1) {
+ reg = *gic_bmp_ptr32(bmp, irq);
+ kvm_gic_line_level_access(s, irq, 0, &reg, true);
+ }
+}
+
+/* Read a bitmap register group from the kernel VGIC. */
+static void kvm_dist_getbmp(GICv3State *s, uint32_t offset, uint32_t *bmp)
+{
+ uint32_t reg;
+ int irq;
+
+ /* For the KVM GICv3, affinity routing is always enabled, and the
+ * GICD_IGROUPR0/GICD_IGRPMODR0/GICD_ISENABLER0/GICD_ISPENDR0/
+ * GICD_ISACTIVER0 registers are always RAZ/WI. The corresponding
+ * functionality is replaced by the GICR registers. It doesn't need to sync
+ * them. So it should increase the offset to skip GIC_INTERNAL irqs.
+ * This matches the for_each_dist_irq_reg() macro which also skips the
+ * first GIC_INTERNAL irqs.
+ */
+ offset += (GIC_INTERNAL * 1) / 8;
+ for_each_dist_irq_reg(irq, s->num_irq, 1) {
+ kvm_gicd_access(s, offset, &reg, false);
+ *gic_bmp_ptr32(bmp, irq) = reg;
+ offset += 4;
+ }
+}
+
+static void kvm_dist_putbmp(GICv3State *s, uint32_t offset,
+ uint32_t clroffset, uint32_t *bmp)
+{
+ uint32_t reg;
+ int irq;
+
+ /* For the KVM GICv3, affinity routing is always enabled, and the
+ * GICD_IGROUPR0/GICD_IGRPMODR0/GICD_ISENABLER0/GICD_ISPENDR0/
+ * GICD_ISACTIVER0 registers are always RAZ/WI. The corresponding
+ * functionality is replaced by the GICR registers. It doesn't need to sync
+ * them. So it should increase the offset and clroffset to skip GIC_INTERNAL
+ * irqs. This matches the for_each_dist_irq_reg() macro which also skips the
+ * first GIC_INTERNAL irqs.
+ */
+ offset += (GIC_INTERNAL * 1) / 8;
+ if (clroffset != 0) {
+ clroffset += (GIC_INTERNAL * 1) / 8;
+ }
+
+ for_each_dist_irq_reg(irq, s->num_irq, 1) {
+ /* If this bitmap is a set/clear register pair, first write to the
+ * clear-reg to clear all bits before using the set-reg to write
+ * the 1 bits.
+ */
+ if (clroffset != 0) {
+ reg = 0;
+ kvm_gicd_access(s, clroffset, &reg, true);
+ clroffset += 4;
+ }
+ reg = *gic_bmp_ptr32(bmp, irq);
+ kvm_gicd_access(s, offset, &reg, true);
+ offset += 4;
+ }
+}
+
+static void kvm_arm_gicv3_check(GICv3State *s)
+{
+ uint32_t reg;
+ uint32_t num_irq;
+
+ /* Sanity checking s->num_irq */
+ kvm_gicd_access(s, GICD_TYPER, &reg, false);
+ num_irq = ((reg & 0x1f) + 1) * 32;
+
+ if (num_irq < s->num_irq) {
+ error_report("Model requests %u IRQs, but kernel supports max %u",
+ s->num_irq, num_irq);
+ abort();
+ }
+}
+
+static void kvm_arm_gicv3_put(GICv3State *s)
+{
+ uint32_t regl, regh, reg;
+ uint64_t reg64, redist_typer;
+ int ncpu, i;
+
+ kvm_arm_gicv3_check(s);
+
+ kvm_gicr_access(s, GICR_TYPER, 0, &regl, false);
+ kvm_gicr_access(s, GICR_TYPER + 4, 0, &regh, false);
+ redist_typer = ((uint64_t)regh << 32) | regl;
+
+ reg = s->gicd_ctlr;
+ kvm_gicd_access(s, GICD_CTLR, &reg, true);
+
+ if (redist_typer & GICR_TYPER_PLPIS) {
+ /*
+ * Restore base addresses before LPIs are potentially enabled by
+ * GICR_CTLR write
+ */
+ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+ GICv3CPUState *c = &s->cpu[ncpu];
+
+ reg64 = c->gicr_propbaser;
+ regl = (uint32_t)reg64;
+ kvm_gicr_access(s, GICR_PROPBASER, ncpu, &regl, true);
+ regh = (uint32_t)(reg64 >> 32);
+ kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, &regh, true);
+
+ reg64 = c->gicr_pendbaser;
+ regl = (uint32_t)reg64;
+ kvm_gicr_access(s, GICR_PENDBASER, ncpu, &regl, true);
+ regh = (uint32_t)(reg64 >> 32);
+ kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, &regh, true);
+ }
+ }
+
+ /* Redistributor state (one per CPU) */
+
+ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+ GICv3CPUState *c = &s->cpu[ncpu];
+
+ reg = c->gicr_ctlr;
+ kvm_gicr_access(s, GICR_CTLR, ncpu, &reg, true);
+
+ reg = c->gicr_statusr[GICV3_NS];
+ kvm_gicr_access(s, GICR_STATUSR, ncpu, &reg, true);
+
+ reg = c->gicr_waker;
+ kvm_gicr_access(s, GICR_WAKER, ncpu, &reg, true);
+
+ reg = c->gicr_igroupr0;
+ kvm_gicr_access(s, GICR_IGROUPR0, ncpu, &reg, true);
+
+ reg = ~0;
+ kvm_gicr_access(s, GICR_ICENABLER0, ncpu, &reg, true);
+ reg = c->gicr_ienabler0;
+ kvm_gicr_access(s, GICR_ISENABLER0, ncpu, &reg, true);
+
+ /* Restore config before pending so we treat level/edge correctly */
+ reg = half_shuffle32(c->edge_trigger >> 16) << 1;
+ kvm_gicr_access(s, GICR_ICFGR1, ncpu, &reg, true);
+
+ reg = c->level;
+ kvm_gic_line_level_access(s, 0, ncpu, &reg, true);
+
+ reg = ~0;
+ kvm_gicr_access(s, GICR_ICPENDR0, ncpu, &reg, true);
+ reg = c->gicr_ipendr0;
+ kvm_gicr_access(s, GICR_ISPENDR0, ncpu, &reg, true);
+
+ reg = ~0;
+ kvm_gicr_access(s, GICR_ICACTIVER0, ncpu, &reg, true);
+ reg = c->gicr_iactiver0;
+ kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, &reg, true);
+
+ for (i = 0; i < GIC_INTERNAL; i += 4) {
+ reg = c->gicr_ipriorityr[i] |
+ (c->gicr_ipriorityr[i + 1] << 8) |
+ (c->gicr_ipriorityr[i + 2] << 16) |
+ (c->gicr_ipriorityr[i + 3] << 24);
+ kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, &reg, true);
+ }
+ }
+
+ /* Distributor state (shared between all CPUs */
+ reg = s->gicd_statusr[GICV3_NS];
+ kvm_gicd_access(s, GICD_STATUSR, &reg, true);
+
+ /* s->enable bitmap -> GICD_ISENABLERn */
+ kvm_dist_putbmp(s, GICD_ISENABLER, GICD_ICENABLER, s->enabled);
+
+ /* s->group bitmap -> GICD_IGROUPRn */
+ kvm_dist_putbmp(s, GICD_IGROUPR, 0, s->group);
+
+ /* Restore targets before pending to ensure the pending state is set on
+ * the appropriate CPU interfaces in the kernel
+ */
+
+ /* s->gicd_irouter[irq] -> GICD_IROUTERn
+ * We can't use kvm_dist_put() here because the registers are 64-bit
+ */
+ for (i = GIC_INTERNAL; i < s->num_irq; i++) {
+ uint32_t offset;
+
+ offset = GICD_IROUTER + (sizeof(uint32_t) * i);
+ reg = (uint32_t)s->gicd_irouter[i];
+ kvm_gicd_access(s, offset, &reg, true);
+
+ offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4;
+ reg = (uint32_t)(s->gicd_irouter[i] >> 32);
+ kvm_gicd_access(s, offset, &reg, true);
+ }
+
+ /* s->trigger bitmap -> GICD_ICFGRn
+ * (restore configuration registers before pending IRQs so we treat
+ * level/edge correctly)
+ */
+ kvm_dist_put_edge_trigger(s, GICD_ICFGR, s->edge_trigger);
+
+ /* s->level bitmap -> line_level */
+ kvm_gic_put_line_level_bmp(s, s->level);
+
+ /* s->pending bitmap -> GICD_ISPENDRn */
+ kvm_dist_putbmp(s, GICD_ISPENDR, GICD_ICPENDR, s->pending);
+
+ /* s->active bitmap -> GICD_ISACTIVERn */
+ kvm_dist_putbmp(s, GICD_ISACTIVER, GICD_ICACTIVER, s->active);
+
+ /* s->gicd_ipriority[] -> GICD_IPRIORITYRn */
+ kvm_dist_put_priority(s, GICD_IPRIORITYR, s->gicd_ipriority);
+
+ /* CPU Interface state (one per CPU) */
+
+ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+ GICv3CPUState *c = &s->cpu[ncpu];
+ int num_pri_bits;
+
+ kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, true);
+ kvm_gicc_access(s, ICC_CTLR_EL1, ncpu,
+ &c->icc_ctlr_el1[GICV3_NS], true);
+ kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu,
+ &c->icc_igrpen[GICV3_G0], true);
+ kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu,
+ &c->icc_igrpen[GICV3_G1NS], true);
+ kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, true);
+ kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], true);
+ kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], true);
+
+ num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] &
+ ICC_CTLR_EL1_PRIBITS_MASK) >>
+ ICC_CTLR_EL1_PRIBITS_SHIFT) + 1;
+
+ switch (num_pri_bits) {
+ case 7:
+ reg64 = c->icc_apr[GICV3_G0][3];
+ kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, &reg64, true);
+ reg64 = c->icc_apr[GICV3_G0][2];
+ kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, &reg64, true);
+ /* fall through */
+ case 6:
+ reg64 = c->icc_apr[GICV3_G0][1];
+ kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, &reg64, true);
+ /* fall through */
+ default:
+ reg64 = c->icc_apr[GICV3_G0][0];
+ kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, &reg64, true);
+ }
+
+ switch (num_pri_bits) {
+ case 7:
+ reg64 = c->icc_apr[GICV3_G1NS][3];
+ kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, &reg64, true);
+ reg64 = c->icc_apr[GICV3_G1NS][2];
+ kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, &reg64, true);
+ /* fall through */
+ case 6:
+ reg64 = c->icc_apr[GICV3_G1NS][1];
+ kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, &reg64, true);
+ /* fall through */
+ default:
+ reg64 = c->icc_apr[GICV3_G1NS][0];
+ kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, &reg64, true);
+ }
+ }
+}
+
+static void kvm_arm_gicv3_get(GICv3State *s)
+{
+ uint32_t regl, regh, reg;
+ uint64_t reg64, redist_typer;
+ int ncpu, i;
+
+ kvm_arm_gicv3_check(s);
+
+ kvm_gicr_access(s, GICR_TYPER, 0, &regl, false);
+ kvm_gicr_access(s, GICR_TYPER + 4, 0, &regh, false);
+ redist_typer = ((uint64_t)regh << 32) | regl;
+
+ kvm_gicd_access(s, GICD_CTLR, &reg, false);
+ s->gicd_ctlr = reg;
+
+ /* Redistributor state (one per CPU) */
+
+ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+ GICv3CPUState *c = &s->cpu[ncpu];
+
+ kvm_gicr_access(s, GICR_CTLR, ncpu, &reg, false);
+ c->gicr_ctlr = reg;
+
+ kvm_gicr_access(s, GICR_STATUSR, ncpu, &reg, false);
+ c->gicr_statusr[GICV3_NS] = reg;
+
+ kvm_gicr_access(s, GICR_WAKER, ncpu, &reg, false);
+ c->gicr_waker = reg;
+
+ kvm_gicr_access(s, GICR_IGROUPR0, ncpu, &reg, false);
+ c->gicr_igroupr0 = reg;
+ kvm_gicr_access(s, GICR_ISENABLER0, ncpu, &reg, false);
+ c->gicr_ienabler0 = reg;
+ kvm_gicr_access(s, GICR_ICFGR1, ncpu, &reg, false);
+ c->edge_trigger = half_unshuffle32(reg >> 1) << 16;
+ kvm_gic_line_level_access(s, 0, ncpu, &reg, false);
+ c->level = reg;
+ kvm_gicr_access(s, GICR_ISPENDR0, ncpu, &reg, false);
+ c->gicr_ipendr0 = reg;
+ kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, &reg, false);
+ c->gicr_iactiver0 = reg;
+
+ for (i = 0; i < GIC_INTERNAL; i += 4) {
+ kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, &reg, false);
+ c->gicr_ipriorityr[i] = extract32(reg, 0, 8);
+ c->gicr_ipriorityr[i + 1] = extract32(reg, 8, 8);
+ c->gicr_ipriorityr[i + 2] = extract32(reg, 16, 8);
+ c->gicr_ipriorityr[i + 3] = extract32(reg, 24, 8);
+ }
+ }
+
+ if (redist_typer & GICR_TYPER_PLPIS) {
+ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+ GICv3CPUState *c = &s->cpu[ncpu];
+
+ kvm_gicr_access(s, GICR_PROPBASER, ncpu, &regl, false);
+ kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, &regh, false);
+ c->gicr_propbaser = ((uint64_t)regh << 32) | regl;
+
+ kvm_gicr_access(s, GICR_PENDBASER, ncpu, &regl, false);
+ kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, &regh, false);
+ c->gicr_pendbaser = ((uint64_t)regh << 32) | regl;
+ }
+ }
+
+ /* Distributor state (shared between all CPUs */
+
+ kvm_gicd_access(s, GICD_STATUSR, &reg, false);
+ s->gicd_statusr[GICV3_NS] = reg;
+
+ /* GICD_IGROUPRn -> s->group bitmap */
+ kvm_dist_getbmp(s, GICD_IGROUPR, s->group);
+
+ /* GICD_ISENABLERn -> s->enabled bitmap */
+ kvm_dist_getbmp(s, GICD_ISENABLER, s->enabled);
+
+ /* Line level of irq */
+ kvm_gic_get_line_level_bmp(s, s->level);
+ /* GICD_ISPENDRn -> s->pending bitmap */
+ kvm_dist_getbmp(s, GICD_ISPENDR, s->pending);
+
+ /* GICD_ISACTIVERn -> s->active bitmap */
+ kvm_dist_getbmp(s, GICD_ISACTIVER, s->active);
+
+ /* GICD_ICFGRn -> s->trigger bitmap */
+ kvm_dist_get_edge_trigger(s, GICD_ICFGR, s->edge_trigger);
+
+ /* GICD_IPRIORITYRn -> s->gicd_ipriority[] */
+ kvm_dist_get_priority(s, GICD_IPRIORITYR, s->gicd_ipriority);
+
+ /* GICD_IROUTERn -> s->gicd_irouter[irq] */
+ for (i = GIC_INTERNAL; i < s->num_irq; i++) {
+ uint32_t offset;
+
+ offset = GICD_IROUTER + (sizeof(uint32_t) * i);
+ kvm_gicd_access(s, offset, &regl, false);
+ offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4;
+ kvm_gicd_access(s, offset, &regh, false);
+ s->gicd_irouter[i] = ((uint64_t)regh << 32) | regl;
+ }
+
+ /*****************************************************************
+ * CPU Interface(s) State
+ */
+
+ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+ GICv3CPUState *c = &s->cpu[ncpu];
+ int num_pri_bits;
+
+ kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, false);
+ kvm_gicc_access(s, ICC_CTLR_EL1, ncpu,
+ &c->icc_ctlr_el1[GICV3_NS], false);
+ kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu,
+ &c->icc_igrpen[GICV3_G0], false);
+ kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu,
+ &c->icc_igrpen[GICV3_G1NS], false);
+ kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, false);
+ kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], false);
+ kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], false);
+ num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] &
+ ICC_CTLR_EL1_PRIBITS_MASK) >>
+ ICC_CTLR_EL1_PRIBITS_SHIFT) + 1;
+
+ switch (num_pri_bits) {
+ case 7:
+ kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, &reg64, false);
+ c->icc_apr[GICV3_G0][3] = reg64;
+ kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, &reg64, false);
+ c->icc_apr[GICV3_G0][2] = reg64;
+ /* fall through */
+ case 6:
+ kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, &reg64, false);
+ c->icc_apr[GICV3_G0][1] = reg64;
+ /* fall through */
+ default:
+ kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, &reg64, false);
+ c->icc_apr[GICV3_G0][0] = reg64;
+ }
+
+ switch (num_pri_bits) {
+ case 7:
+ kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, &reg64, false);
+ c->icc_apr[GICV3_G1NS][3] = reg64;
+ kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, &reg64, false);
+ c->icc_apr[GICV3_G1NS][2] = reg64;
+ /* fall through */
+ case 6:
+ kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, &reg64, false);
+ c->icc_apr[GICV3_G1NS][1] = reg64;
+ /* fall through */
+ default:
+ kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, &reg64, false);
+ c->icc_apr[GICV3_G1NS][0] = reg64;
+ }
+ }
+}
+
+static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3State *s;
+ GICv3CPUState *c;
+
+ c = (GICv3CPUState *)env->gicv3state;
+ s = c->gic;
+
+ c->icc_pmr_el1 = 0;
+ c->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
+ c->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
+ c->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR;
+
+ c->icc_sre_el1 = 0x7;
+ memset(c->icc_apr, 0, sizeof(c->icc_apr));
+ memset(c->icc_igrpen, 0, sizeof(c->icc_igrpen));
+
+ if (s->migration_blocker) {
+ return;
+ }
+
+ /* Initialize to actual HW supported configuration */
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
+ KVM_VGIC_ATTR(ICC_CTLR_EL1, c->gicr_typer),
+ &c->icc_ctlr_el1[GICV3_NS], false, &error_abort);
+
+ c->icc_ctlr_el1[GICV3_S] = c->icc_ctlr_el1[GICV3_NS];
+}
+
+static void kvm_arm_gicv3_reset(DeviceState *dev)
+{
+ GICv3State *s = ARM_GICV3_COMMON(dev);
+ KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s);
+
+ DPRINTF("Reset\n");
+
+ kgc->parent_reset(dev);
+
+ if (s->migration_blocker) {
+ DPRINTF("Cannot put kernel gic state, no kernel interface\n");
+ return;
+ }
+
+ kvm_arm_gicv3_put(s);
+}
+
+/*
+ * CPU interface registers of GIC needs to be reset on CPU reset.
+ * For the calling arm_gicv3_icc_reset() on CPU reset, we register
+ * below ARMCPRegInfo. As we reset the whole cpu interface under single
+ * register reset, we define only one register of CPU interface instead
+ * of defining all the registers.
+ */
+static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
+ { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
+ /*
+ * If ARM_CP_NOP is used, resetfn is not called,
+ * So ARM_CP_NO_RAW is appropriate type.
+ */
+ .type = ARM_CP_NO_RAW,
+ .access = PL1_RW,
+ .readfn = arm_cp_read_zero,
+ .writefn = arm_cp_write_ignore,
+ /*
+ * We hang the whole cpu interface reset routine off here
+ * rather than parcelling it out into one little function
+ * per register
+ */
+ .resetfn = arm_gicv3_icc_reset,
+ },
+ REGINFO_SENTINEL
+};
+
+/**
+ * vm_change_state_handler - VM change state callback aiming at flushing
+ * RDIST pending tables into guest RAM
+ *
+ * The tables get flushed to guest RAM whenever the VM gets stopped.
+ */
+static void vm_change_state_handler(void *opaque, bool running,
+ RunState state)
+{
+ GICv3State *s = (GICv3State *)opaque;
+ Error *err = NULL;
+ int ret;
+
+ if (running) {
+ return;
+ }
+
+ ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES,
+ NULL, true, &err);
+ if (err) {
+ error_report_err(err);
+ }
+ if (ret < 0 && ret != -EFAULT) {
+ abort();
+ }
+}
+
+
+static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
+{
+ GICv3State *s = KVM_ARM_GICV3(dev);
+ KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s);
+ bool multiple_redist_region_allowed;
+ Error *local_err = NULL;
+ int i;
+
+ DPRINTF("kvm_arm_gicv3_realize\n");
+
+ kgc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (s->security_extn) {
+ error_setg(errp, "the in-kernel VGICv3 does not implement the "
+ "security extensions");
+ return;
+ }
+
+ gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL);
+
+ for (i = 0; i < s->num_cpu; i++) {
+ ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
+
+ define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
+ }
+
+ /* Try to create the device via the device control API */
+ s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+ if (s->dev_fd < 0) {
+ error_setg_errno(errp, -s->dev_fd, "error creating in-kernel VGIC");
+ return;
+ }
+
+ multiple_redist_region_allowed =
+ kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION);
+
+ if (!multiple_redist_region_allowed && s->nb_redist_regions > 1) {
+ error_setg(errp, "Multiple VGICv3 redistributor regions are not "
+ "supported by this host kernel");
+ error_append_hint(errp, "A maximum of %d VCPUs can be used",
+ s->redist_region_count[0]);
+ return;
+ }
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
+ 0, &s->num_irq, true, &error_abort);
+
+ /* Tell the kernel to complete VGIC initialization now */
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, &error_abort);
+
+ kvm_arm_register_device(&s->iomem_dist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd, 0);
+
+ if (!multiple_redist_region_allowed) {
+ kvm_arm_register_device(&s->redist_regions[0].iomem, -1,
+ KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd, 0);
+ } else {
+ /* we register regions in reverse order as "devices" are inserted at
+ * the head of a QSLIST and the list is then popped from the head
+ * onwards by kvm_arm_machine_init_done()
+ */
+ for (i = s->nb_redist_regions - 1; i >= 0; i--) {
+ /* Address mask made of the rdist region index and count */
+ uint64_t addr_ormask =
+ i | ((uint64_t)s->redist_region_count[i] << 52);
+
+ kvm_arm_register_device(&s->redist_regions[i].iomem, -1,
+ KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION,
+ s->dev_fd, addr_ormask);
+ }
+ }
+
+ if (kvm_has_gsi_routing()) {
+ /* set up irq routing */
+ for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
+ kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
+ }
+
+ kvm_gsi_routing_allowed = true;
+
+ kvm_irqchip_commit_routes(kvm_state);
+ }
+
+ if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
+ GICD_CTLR)) {
+ error_setg(&s->migration_blocker, "This operating system kernel does "
+ "not support vGICv3 migration");
+ if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
+ error_free(s->migration_blocker);
+ return;
+ }
+ }
+ if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES)) {
+ qemu_add_vm_change_state_handler(vm_change_state_handler, s);
+ }
+}
+
+static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
+ KVMARMGICv3Class *kgc = KVM_ARM_GICV3_CLASS(klass);
+
+ agcc->pre_save = kvm_arm_gicv3_get;
+ agcc->post_load = kvm_arm_gicv3_put;
+ device_class_set_parent_realize(dc, kvm_arm_gicv3_realize,
+ &kgc->parent_realize);
+ device_class_set_parent_reset(dc, kvm_arm_gicv3_reset, &kgc->parent_reset);
+}
+
+static const TypeInfo kvm_arm_gicv3_info = {
+ .name = TYPE_KVM_ARM_GICV3,
+ .parent = TYPE_ARM_GICV3_COMMON,
+ .instance_size = sizeof(GICv3State),
+ .class_init = kvm_arm_gicv3_class_init,
+ .class_size = sizeof(KVMARMGICv3Class),
+};
+
+static void kvm_arm_gicv3_register_types(void)
+{
+ type_register_static(&kvm_arm_gicv3_info);
+}
+
+type_init(kvm_arm_gicv3_register_types)
diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
new file mode 100644
index 000000000..c8ff3eca0
--- /dev/null
+++ b/hw/intc/arm_gicv3_redist.c
@@ -0,0 +1,738 @@
+/*
+ * ARM GICv3 emulation: Redistributor
+ *
+ * Copyright (c) 2015 Huawei.
+ * Copyright (c) 2016 Linaro Limited.
+ * Written by Shlomo Pongratz, Peter Maydell
+ *
+ * This code is licensed under the GPL, version 2 or (at your option)
+ * any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "gicv3_internal.h"
+
+static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
+{
+ /* Return a 32-bit mask which should be applied for this set of 32
+ * interrupts; each bit is 1 if access is permitted by the
+ * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
+ * not affect config register accesses, unlike GICD_NSACR.)
+ */
+ if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
+ /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
+ return cs->gicr_igroupr0;
+ }
+ return 0xFFFFFFFFU;
+}
+
+static int gicr_ns_access(GICv3CPUState *cs, int irq)
+{
+ /* Return the 2 bit NSACR.NS_access field for this SGI */
+ assert(irq < 16);
+ return extract32(cs->gicr_nsacr, irq * 2, 2);
+}
+
+static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
+ uint32_t *reg, uint32_t val)
+{
+ /* Helper routine to implement writing to a "set-bitmap" register */
+ val &= mask_group(cs, attrs);
+ *reg |= val;
+ gicv3_redist_update(cs);
+}
+
+static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
+ uint32_t *reg, uint32_t val)
+{
+ /* Helper routine to implement writing to a "clear-bitmap" register */
+ val &= mask_group(cs, attrs);
+ *reg &= ~val;
+ gicv3_redist_update(cs);
+}
+
+static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
+ uint32_t reg)
+{
+ reg &= mask_group(cs, attrs);
+ return reg;
+}
+
+static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
+ int irq)
+{
+ /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
+ * honouring security state (these are RAZ/WI for Group 0 or Secure
+ * Group 1 interrupts).
+ */
+ uint32_t prio;
+
+ prio = cs->gicr_ipriorityr[irq];
+
+ if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
+ if (!(cs->gicr_igroupr0 & (1U << irq))) {
+ /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
+ return 0;
+ }
+ /* NS view of the interrupt priority */
+ prio = (prio << 1) & 0xff;
+ }
+ return prio;
+}
+
+static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
+ uint8_t value)
+{
+ /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
+ * honouring security state (these are RAZ/WI for Group 0 or Secure
+ * Group 1 interrupts).
+ */
+ if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
+ if (!(cs->gicr_igroupr0 & (1U << irq))) {
+ /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
+ return;
+ }
+ /* NS view of the interrupt priority */
+ value = 0x80 | (value >> 1);
+ }
+ cs->gicr_ipriorityr[irq] = value;
+}
+
+static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
+ *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
+ return MEMTX_OK;
+ default:
+ return MEMTX_ERROR;
+ }
+}
+
+static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
+ gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
+ gicv3_redist_update(cs);
+ return MEMTX_OK;
+ default:
+ return MEMTX_ERROR;
+ }
+}
+
+static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case GICR_CTLR:
+ *data = cs->gicr_ctlr;
+ return MEMTX_OK;
+ case GICR_IIDR:
+ *data = gicv3_iidr();
+ return MEMTX_OK;
+ case GICR_TYPER:
+ *data = extract64(cs->gicr_typer, 0, 32);
+ return MEMTX_OK;
+ case GICR_TYPER + 4:
+ *data = extract64(cs->gicr_typer, 32, 32);
+ return MEMTX_OK;
+ case GICR_STATUSR:
+ /* RAZ/WI for us (this is an optional register and our implementation
+ * does not track RO/WO/reserved violations to report them to the guest)
+ */
+ *data = 0;
+ return MEMTX_OK;
+ case GICR_WAKER:
+ *data = cs->gicr_waker;
+ return MEMTX_OK;
+ case GICR_PROPBASER:
+ *data = extract64(cs->gicr_propbaser, 0, 32);
+ return MEMTX_OK;
+ case GICR_PROPBASER + 4:
+ *data = extract64(cs->gicr_propbaser, 32, 32);
+ return MEMTX_OK;
+ case GICR_PENDBASER:
+ *data = extract64(cs->gicr_pendbaser, 0, 32);
+ return MEMTX_OK;
+ case GICR_PENDBASER + 4:
+ *data = extract64(cs->gicr_pendbaser, 32, 32);
+ return MEMTX_OK;
+ case GICR_IGROUPR0:
+ if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
+ *data = 0;
+ return MEMTX_OK;
+ }
+ *data = cs->gicr_igroupr0;
+ return MEMTX_OK;
+ case GICR_ISENABLER0:
+ case GICR_ICENABLER0:
+ *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
+ return MEMTX_OK;
+ case GICR_ISPENDR0:
+ case GICR_ICPENDR0:
+ {
+ /* The pending register reads as the logical OR of the pending
+ * latch and the input line level for level-triggered interrupts.
+ */
+ uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
+ *data = gicr_read_bitmap_reg(cs, attrs, val);
+ return MEMTX_OK;
+ }
+ case GICR_ISACTIVER0:
+ case GICR_ICACTIVER0:
+ *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
+ return MEMTX_OK;
+ case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
+ {
+ int i, irq = offset - GICR_IPRIORITYR;
+ uint32_t value = 0;
+
+ for (i = irq + 3; i >= irq; i--) {
+ value <<= 8;
+ value |= gicr_read_ipriorityr(cs, attrs, i);
+ }
+ *data = value;
+ return MEMTX_OK;
+ }
+ case GICR_ICFGR0:
+ case GICR_ICFGR1:
+ {
+ /* Our edge_trigger bitmap is one bit per irq; take the correct
+ * half of it, and spread it out into the odd bits.
+ */
+ uint32_t value;
+
+ value = cs->edge_trigger & mask_group(cs, attrs);
+ value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
+ value = half_shuffle32(value) << 1;
+ *data = value;
+ return MEMTX_OK;
+ }
+ case GICR_IGRPMODR0:
+ if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
+ /* RAZ/WI if security disabled, or if
+ * security enabled and this is an NS access
+ */
+ *data = 0;
+ return MEMTX_OK;
+ }
+ *data = cs->gicr_igrpmodr0;
+ return MEMTX_OK;
+ case GICR_NSACR:
+ if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
+ /* RAZ/WI if security disabled, or if
+ * security enabled and this is an NS access
+ */
+ *data = 0;
+ return MEMTX_OK;
+ }
+ *data = cs->gicr_nsacr;
+ return MEMTX_OK;
+ case GICR_IDREGS ... GICR_IDREGS + 0x2f:
+ *data = gicv3_idreg(offset - GICR_IDREGS);
+ return MEMTX_OK;
+ default:
+ return MEMTX_ERROR;
+ }
+}
+
+static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case GICR_CTLR:
+ /* For our implementation, GICR_TYPER.DPGS is 0 and so all
+ * the DPG bits are RAZ/WI. We don't do anything asynchronously,
+ * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
+ * implement LPIs) so Enable_LPIs is programmable.
+ */
+ if (cs->gicr_typer & GICR_TYPER_PLPIS) {
+ if (value & GICR_CTLR_ENABLE_LPIS) {
+ cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS;
+ /* Check for any pending interr in pending table */
+ gicv3_redist_update_lpi(cs);
+ } else {
+ cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS;
+ /* cs->hppi might have been an LPI; recalculate */
+ gicv3_redist_update(cs);
+ }
+ }
+ return MEMTX_OK;
+ case GICR_STATUSR:
+ /* RAZ/WI for our implementation */
+ return MEMTX_OK;
+ case GICR_WAKER:
+ /* Only the ProcessorSleep bit is writeable. When the guest sets
+ * it it requests that we transition the channel between the
+ * redistributor and the cpu interface to quiescent, and that
+ * we set the ChildrenAsleep bit once the inteface has reached the
+ * quiescent state.
+ * Setting the ProcessorSleep to 0 reverses the quiescing, and
+ * ChildrenAsleep is cleared once the transition is complete.
+ * Since our interface is not asynchronous, we complete these
+ * transitions instantaneously, so we set ChildrenAsleep to the
+ * same value as ProcessorSleep here.
+ */
+ value &= GICR_WAKER_ProcessorSleep;
+ if (value & GICR_WAKER_ProcessorSleep) {
+ value |= GICR_WAKER_ChildrenAsleep;
+ }
+ cs->gicr_waker = value;
+ return MEMTX_OK;
+ case GICR_PROPBASER:
+ cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
+ return MEMTX_OK;
+ case GICR_PROPBASER + 4:
+ cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
+ return MEMTX_OK;
+ case GICR_PENDBASER:
+ cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
+ return MEMTX_OK;
+ case GICR_PENDBASER + 4:
+ cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
+ return MEMTX_OK;
+ case GICR_IGROUPR0:
+ if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
+ return MEMTX_OK;
+ }
+ cs->gicr_igroupr0 = value;
+ gicv3_redist_update(cs);
+ return MEMTX_OK;
+ case GICR_ISENABLER0:
+ gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
+ return MEMTX_OK;
+ case GICR_ICENABLER0:
+ gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
+ return MEMTX_OK;
+ case GICR_ISPENDR0:
+ gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
+ return MEMTX_OK;
+ case GICR_ICPENDR0:
+ gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
+ return MEMTX_OK;
+ case GICR_ISACTIVER0:
+ gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
+ return MEMTX_OK;
+ case GICR_ICACTIVER0:
+ gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
+ return MEMTX_OK;
+ case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
+ {
+ int i, irq = offset - GICR_IPRIORITYR;
+
+ for (i = irq; i < irq + 4; i++, value >>= 8) {
+ gicr_write_ipriorityr(cs, attrs, i, value);
+ }
+ gicv3_redist_update(cs);
+ return MEMTX_OK;
+ }
+ case GICR_ICFGR0:
+ /* Register is all RAZ/WI or RAO/WI bits */
+ return MEMTX_OK;
+ case GICR_ICFGR1:
+ {
+ uint32_t mask;
+
+ /* Since our edge_trigger bitmap is one bit per irq, our input
+ * 32-bits will compress down into 16 bits which we need
+ * to write into the bitmap.
+ */
+ value = half_unshuffle32(value >> 1) << 16;
+ mask = mask_group(cs, attrs) & 0xffff0000U;
+
+ cs->edge_trigger &= ~mask;
+ cs->edge_trigger |= (value & mask);
+
+ gicv3_redist_update(cs);
+ return MEMTX_OK;
+ }
+ case GICR_IGRPMODR0:
+ if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
+ /* RAZ/WI if security disabled, or if
+ * security enabled and this is an NS access
+ */
+ return MEMTX_OK;
+ }
+ cs->gicr_igrpmodr0 = value;
+ gicv3_redist_update(cs);
+ return MEMTX_OK;
+ case GICR_NSACR:
+ if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
+ /* RAZ/WI if security disabled, or if
+ * security enabled and this is an NS access
+ */
+ return MEMTX_OK;
+ }
+ cs->gicr_nsacr = value;
+ /* no update required as this only affects access permission checks */
+ return MEMTX_OK;
+ case GICR_IIDR:
+ case GICR_TYPER:
+ case GICR_IDREGS ... GICR_IDREGS + 0x2f:
+ /* RO registers, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ return MEMTX_OK;
+ default:
+ return MEMTX_ERROR;
+ }
+}
+
+static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case GICR_TYPER:
+ *data = cs->gicr_typer;
+ return MEMTX_OK;
+ case GICR_PROPBASER:
+ *data = cs->gicr_propbaser;
+ return MEMTX_OK;
+ case GICR_PENDBASER:
+ *data = cs->gicr_pendbaser;
+ return MEMTX_OK;
+ default:
+ return MEMTX_ERROR;
+ }
+}
+
+static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+ switch (offset) {
+ case GICR_PROPBASER:
+ cs->gicr_propbaser = value;
+ return MEMTX_OK;
+ case GICR_PENDBASER:
+ cs->gicr_pendbaser = value;
+ return MEMTX_OK;
+ case GICR_TYPER:
+ /* RO register, ignore the write */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write to RO register at offset "
+ TARGET_FMT_plx "\n", __func__, offset);
+ return MEMTX_OK;
+ default:
+ return MEMTX_ERROR;
+ }
+}
+
+MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICv3RedistRegion *region = opaque;
+ GICv3State *s = region->gic;
+ GICv3CPUState *cs;
+ MemTxResult r;
+ int cpuidx;
+
+ assert((offset & (size - 1)) == 0);
+
+ /*
+ * There are (for GICv3) two 64K redistributor pages per CPU.
+ * In some cases the redistributor pages for all CPUs are not
+ * contiguous (eg on the virt board they are split into two
+ * parts if there are too many CPUs to all fit in the same place
+ * in the memory map); if so then the GIC has multiple MemoryRegions
+ * for the redistributors.
+ */
+ cpuidx = region->cpuidx + offset / GICV3_REDIST_SIZE;
+ offset %= GICV3_REDIST_SIZE;
+
+ cs = &s->cpu[cpuidx];
+
+ switch (size) {
+ case 1:
+ r = gicr_readb(cs, offset, data, attrs);
+ break;
+ case 4:
+ r = gicr_readl(cs, offset, data, attrs);
+ break;
+ case 8:
+ r = gicr_readll(cs, offset, data, attrs);
+ break;
+ default:
+ r = MEMTX_ERROR;
+ break;
+ }
+
+ if (r == MEMTX_ERROR) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest read at offset " TARGET_FMT_plx
+ " size %u\n", __func__, offset, size);
+ trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
+ size, attrs.secure);
+ /* The spec requires that reserved registers are RAZ/WI;
+ * so use MEMTX_ERROR returns from leaf functions as a way to
+ * trigger the guest-error logging but don't return it to
+ * the caller, or we'll cause a spurious guest data abort.
+ */
+ r = MEMTX_OK;
+ *data = 0;
+ } else {
+ trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
+ size, attrs.secure);
+ }
+ return r;
+}
+
+MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICv3RedistRegion *region = opaque;
+ GICv3State *s = region->gic;
+ GICv3CPUState *cs;
+ MemTxResult r;
+ int cpuidx;
+
+ assert((offset & (size - 1)) == 0);
+
+ /*
+ * There are (for GICv3) two 64K redistributor pages per CPU.
+ * In some cases the redistributor pages for all CPUs are not
+ * contiguous (eg on the virt board they are split into two
+ * parts if there are too many CPUs to all fit in the same place
+ * in the memory map); if so then the GIC has multiple MemoryRegions
+ * for the redistributors.
+ */
+ cpuidx = region->cpuidx + offset / GICV3_REDIST_SIZE;
+ offset %= GICV3_REDIST_SIZE;
+
+ cs = &s->cpu[cpuidx];
+
+ switch (size) {
+ case 1:
+ r = gicr_writeb(cs, offset, data, attrs);
+ break;
+ case 4:
+ r = gicr_writel(cs, offset, data, attrs);
+ break;
+ case 8:
+ r = gicr_writell(cs, offset, data, attrs);
+ break;
+ default:
+ r = MEMTX_ERROR;
+ break;
+ }
+
+ if (r == MEMTX_ERROR) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write at offset " TARGET_FMT_plx
+ " size %u\n", __func__, offset, size);
+ trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
+ size, attrs.secure);
+ /* The spec requires that reserved registers are RAZ/WI;
+ * so use MEMTX_ERROR returns from leaf functions as a way to
+ * trigger the guest-error logging but don't return it to
+ * the caller, or we'll cause a spurious guest data abort.
+ */
+ r = MEMTX_OK;
+ } else {
+ trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
+ size, attrs.secure);
+ }
+ return r;
+}
+
+static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
+{
+ AddressSpace *as = &cs->gic->dma_as;
+ uint64_t lpict_baddr;
+ uint8_t lpite;
+ uint8_t prio;
+
+ lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
+
+ address_space_read(as, lpict_baddr + ((irq - GICV3_LPI_INTID_START) *
+ sizeof(lpite)), MEMTXATTRS_UNSPECIFIED, &lpite,
+ sizeof(lpite));
+
+ if (!(lpite & LPI_CTE_ENABLED)) {
+ return;
+ }
+
+ if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
+ prio = lpite & LPI_PRIORITY_MASK;
+ } else {
+ prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
+ }
+
+ if ((prio < cs->hpplpi.prio) ||
+ ((prio == cs->hpplpi.prio) && (irq <= cs->hpplpi.irq))) {
+ cs->hpplpi.irq = irq;
+ cs->hpplpi.prio = prio;
+ /* LPIs are always non-secure Grp1 interrupts */
+ cs->hpplpi.grp = GICV3_G1NS;
+ }
+}
+
+void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
+{
+ /*
+ * This function scans the LPI pending table and for each pending
+ * LPI, reads the corresponding entry from LPI configuration table
+ * to extract the priority info and determine if the current LPI
+ * priority is lower than the last computed high priority lpi interrupt.
+ * If yes, replace current LPI as the new high priority lpi interrupt.
+ */
+ AddressSpace *as = &cs->gic->dma_as;
+ uint64_t lpipt_baddr;
+ uint32_t pendt_size = 0;
+ uint8_t pend;
+ int i, bit;
+ uint64_t idbits;
+
+ idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
+ GICD_TYPER_IDBITS);
+
+ if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser ||
+ !cs->gicr_pendbaser) {
+ return;
+ }
+
+ cs->hpplpi.prio = 0xff;
+
+ lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
+
+ /* Determine the highest priority pending interrupt among LPIs */
+ pendt_size = (1ULL << (idbits + 1));
+
+ for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
+ address_space_read(as, lpipt_baddr + i, MEMTXATTRS_UNSPECIFIED, &pend,
+ sizeof(pend));
+
+ while (pend) {
+ bit = ctz32(pend);
+ gicv3_redist_check_lpi_priority(cs, i * 8 + bit);
+ pend &= ~(1 << bit);
+ }
+ }
+}
+
+void gicv3_redist_update_lpi(GICv3CPUState *cs)
+{
+ gicv3_redist_update_lpi_only(cs);
+ gicv3_redist_update(cs);
+}
+
+void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
+{
+ /*
+ * This function updates the pending bit in lpi pending table for
+ * the irq being activated or deactivated.
+ */
+ AddressSpace *as = &cs->gic->dma_as;
+ uint64_t lpipt_baddr;
+ bool ispend = false;
+ uint8_t pend;
+
+ /*
+ * get the bit value corresponding to this irq in the
+ * lpi pending table
+ */
+ lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
+
+ address_space_read(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
+ MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
+
+ ispend = extract32(pend, irq % 8, 1);
+
+ /* no change in the value of pending bit, return */
+ if (ispend == level) {
+ return;
+ }
+ pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
+
+ address_space_write(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
+ MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
+
+ /*
+ * check if this LPI is better than the current hpplpi, if yes
+ * just set hpplpi.prio and .irq without doing a full rescan
+ */
+ if (level) {
+ gicv3_redist_check_lpi_priority(cs, irq);
+ gicv3_redist_update(cs);
+ } else {
+ if (irq == cs->hpplpi.irq) {
+ gicv3_redist_update_lpi(cs);
+ }
+ }
+}
+
+void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
+{
+ uint64_t idbits;
+
+ idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
+ GICD_TYPER_IDBITS);
+
+ if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser ||
+ !cs->gicr_pendbaser || (irq > (1ULL << (idbits + 1)) - 1) ||
+ irq < GICV3_LPI_INTID_START) {
+ return;
+ }
+
+ /* set/clear the pending bit for this irq */
+ gicv3_redist_lpi_pending(cs, irq, level);
+}
+
+void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
+{
+ /* Update redistributor state for a change in an external PPI input line */
+ if (level == extract32(cs->level, irq, 1)) {
+ return;
+ }
+
+ trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
+
+ cs->level = deposit32(cs->level, irq, 1, level);
+
+ if (level) {
+ /* 0->1 edges latch the pending bit for edge-triggered interrupts */
+ if (extract32(cs->edge_trigger, irq, 1)) {
+ cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
+ }
+ }
+
+ gicv3_redist_update(cs);
+}
+
+void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
+{
+ /* Update redistributor state for a generated SGI */
+ int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
+
+ /* If we are asked for a Secure Group 1 SGI and it's actually
+ * configured as Secure Group 0 this is OK (subject to the usual
+ * NSACR checks).
+ */
+ if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
+ grp = GICV3_G0;
+ }
+
+ if (grp != irqgrp) {
+ return;
+ }
+
+ if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
+ /* If security is enabled we must test the NSACR bits */
+ int nsaccess = gicr_ns_access(cs, irq);
+
+ if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
+ (irqgrp == GICV3_G1 && nsaccess < 2)) {
+ return;
+ }
+ }
+
+ /* OK, we can accept the SGI */
+ trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
+ cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
+ gicv3_redist_update(cs);
+}
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
new file mode 100644
index 000000000..13df002ce
--- /dev/null
+++ b/hw/intc/armv7m_nvic.c
@@ -0,0 +1,2735 @@
+/*
+ * ARM Nested Vectored Interrupt Controller
+ *
+ * Copyright (c) 2006-2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ *
+ * The ARMv7M System controller is fairly tightly tied in with the
+ * NVIC. Much of that is also implemented here.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "qemu/timer.h"
+#include "hw/intc/armv7m_nvic.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "sysemu/runstate.h"
+#include "target/arm/cpu.h"
+#include "exec/exec-all.h"
+#include "exec/memop.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "trace.h"
+
+/* IRQ number counting:
+ *
+ * the num-irq property counts the number of external IRQ lines
+ *
+ * NVICState::num_irq counts the total number of exceptions
+ * (external IRQs, the 15 internal exceptions including reset,
+ * and one for the unused exception number 0).
+ *
+ * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
+ *
+ * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
+ *
+ * Iterating through all exceptions should typically be done with
+ * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
+ *
+ * The external qemu_irq lines are the NVIC's external IRQ lines,
+ * so line 0 is exception 16.
+ *
+ * In the terminology of the architecture manual, "interrupts" are
+ * a subcategory of exception referring to the external interrupts
+ * (which are exception numbers NVIC_FIRST_IRQ and upward).
+ * For historical reasons QEMU tends to use "interrupt" and
+ * "exception" more or less interchangeably.
+ */
+#define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
+#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
+
+/* Effective running priority of the CPU when no exception is active
+ * (higher than the highest possible priority value)
+ */
+#define NVIC_NOEXC_PRIO 0x100
+/* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
+#define NVIC_NS_PRIO_LIMIT 0x80
+
+static const uint8_t nvic_id[] = {
+ 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
+};
+
+static void signal_sysresetreq(NVICState *s)
+{
+ if (qemu_irq_is_connected(s->sysresetreq)) {
+ qemu_irq_pulse(s->sysresetreq);
+ } else {
+ /*
+ * Default behaviour if the SoC doesn't need to wire up
+ * SYSRESETREQ (eg to a system reset controller of some kind):
+ * perform a system reset via the usual QEMU API.
+ */
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ }
+}
+
+static int nvic_pending_prio(NVICState *s)
+{
+ /* return the group priority of the current pending interrupt,
+ * or NVIC_NOEXC_PRIO if no interrupt is pending
+ */
+ return s->vectpending_prio;
+}
+
+/* Return the value of the ISCR RETTOBASE bit:
+ * 1 if there is exactly one active exception
+ * 0 if there is more than one active exception
+ * UNKNOWN if there are no active exceptions (we choose 1,
+ * which matches the choice Cortex-M3 is documented as making).
+ *
+ * NB: some versions of the documentation talk about this
+ * counting "active exceptions other than the one shown by IPSR";
+ * this is only different in the obscure corner case where guest
+ * code has manually deactivated an exception and is about
+ * to fail an exception-return integrity check. The definition
+ * above is the one from the v8M ARM ARM and is also in line
+ * with the behaviour documented for the Cortex-M3.
+ */
+static bool nvic_rettobase(NVICState *s)
+{
+ int irq, nhand = 0;
+ bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
+
+ for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
+ if (s->vectors[irq].active ||
+ (check_sec && irq < NVIC_INTERNAL_VECTORS &&
+ s->sec_vectors[irq].active)) {
+ nhand++;
+ if (nhand == 2) {
+ return 0;
+ }
+ }
+ }
+
+ return 1;
+}
+
+/* Return the value of the ISCR ISRPENDING bit:
+ * 1 if an external interrupt is pending
+ * 0 if no external interrupt is pending
+ */
+static bool nvic_isrpending(NVICState *s)
+{
+ int irq;
+
+ /*
+ * We can shortcut if the highest priority pending interrupt
+ * happens to be external; if not we need to check the whole
+ * vectors[] array.
+ */
+ if (s->vectpending > NVIC_FIRST_IRQ) {
+ return true;
+ }
+
+ for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
+ if (s->vectors[irq].pending) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool exc_is_banked(int exc)
+{
+ /* Return true if this is one of the limited set of exceptions which
+ * are banked (and thus have state in sec_vectors[])
+ */
+ return exc == ARMV7M_EXCP_HARD ||
+ exc == ARMV7M_EXCP_MEM ||
+ exc == ARMV7M_EXCP_USAGE ||
+ exc == ARMV7M_EXCP_SVC ||
+ exc == ARMV7M_EXCP_PENDSV ||
+ exc == ARMV7M_EXCP_SYSTICK;
+}
+
+/* Return a mask word which clears the subpriority bits from
+ * a priority value for an M-profile exception, leaving only
+ * the group priority.
+ */
+static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
+{
+ return ~0U << (s->prigroup[secure] + 1);
+}
+
+static bool exc_targets_secure(NVICState *s, int exc)
+{
+ /* Return true if this non-banked exception targets Secure state. */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
+ return false;
+ }
+
+ if (exc >= NVIC_FIRST_IRQ) {
+ return !s->itns[exc];
+ }
+
+ /* Function shouldn't be called for banked exceptions. */
+ assert(!exc_is_banked(exc));
+
+ switch (exc) {
+ case ARMV7M_EXCP_NMI:
+ case ARMV7M_EXCP_BUS:
+ return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
+ case ARMV7M_EXCP_SECURE:
+ return true;
+ case ARMV7M_EXCP_DEBUG:
+ /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
+ return false;
+ default:
+ /* reset, and reserved (unused) low exception numbers.
+ * We'll get called by code that loops through all the exception
+ * numbers, but it doesn't matter what we return here as these
+ * non-existent exceptions will never be pended or active.
+ */
+ return true;
+ }
+}
+
+static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
+{
+ /* Return the group priority for this exception, given its raw
+ * (group-and-subgroup) priority value and whether it is targeting
+ * secure state or not.
+ */
+ if (rawprio < 0) {
+ return rawprio;
+ }
+ rawprio &= nvic_gprio_mask(s, targets_secure);
+ /* AIRCR.PRIS causes us to squash all NS priorities into the
+ * lower half of the total range
+ */
+ if (!targets_secure &&
+ (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
+ rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
+ }
+ return rawprio;
+}
+
+/* Recompute vectpending and exception_prio for a CPU which implements
+ * the Security extension
+ */
+static void nvic_recompute_state_secure(NVICState *s)
+{
+ int i, bank;
+ int pend_prio = NVIC_NOEXC_PRIO;
+ int active_prio = NVIC_NOEXC_PRIO;
+ int pend_irq = 0;
+ bool pending_is_s_banked = false;
+ int pend_subprio = 0;
+
+ /* R_CQRV: precedence is by:
+ * - lowest group priority; if both the same then
+ * - lowest subpriority; if both the same then
+ * - lowest exception number; if both the same (ie banked) then
+ * - secure exception takes precedence
+ * Compare pseudocode RawExecutionPriority.
+ * Annoyingly, now we have two prigroup values (for S and NS)
+ * we can't do the loop comparison on raw priority values.
+ */
+ for (i = 1; i < s->num_irq; i++) {
+ for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
+ VecInfo *vec;
+ int prio, subprio;
+ bool targets_secure;
+
+ if (bank == M_REG_S) {
+ if (!exc_is_banked(i)) {
+ continue;
+ }
+ vec = &s->sec_vectors[i];
+ targets_secure = true;
+ } else {
+ vec = &s->vectors[i];
+ targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
+ }
+
+ prio = exc_group_prio(s, vec->prio, targets_secure);
+ subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure);
+ if (vec->enabled && vec->pending &&
+ ((prio < pend_prio) ||
+ (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) {
+ pend_prio = prio;
+ pend_subprio = subprio;
+ pend_irq = i;
+ pending_is_s_banked = (bank == M_REG_S);
+ }
+ if (vec->active && prio < active_prio) {
+ active_prio = prio;
+ }
+ }
+ }
+
+ s->vectpending_is_s_banked = pending_is_s_banked;
+ s->vectpending = pend_irq;
+ s->vectpending_prio = pend_prio;
+ s->exception_prio = active_prio;
+
+ trace_nvic_recompute_state_secure(s->vectpending,
+ s->vectpending_is_s_banked,
+ s->vectpending_prio,
+ s->exception_prio);
+}
+
+/* Recompute vectpending and exception_prio */
+static void nvic_recompute_state(NVICState *s)
+{
+ int i;
+ int pend_prio = NVIC_NOEXC_PRIO;
+ int active_prio = NVIC_NOEXC_PRIO;
+ int pend_irq = 0;
+
+ /* In theory we could write one function that handled both
+ * the "security extension present" and "not present"; however
+ * the security related changes significantly complicate the
+ * recomputation just by themselves and mixing both cases together
+ * would be even worse, so we retain a separate non-secure-only
+ * version for CPUs which don't implement the security extension.
+ */
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
+ nvic_recompute_state_secure(s);
+ return;
+ }
+
+ for (i = 1; i < s->num_irq; i++) {
+ VecInfo *vec = &s->vectors[i];
+
+ if (vec->enabled && vec->pending && vec->prio < pend_prio) {
+ pend_prio = vec->prio;
+ pend_irq = i;
+ }
+ if (vec->active && vec->prio < active_prio) {
+ active_prio = vec->prio;
+ }
+ }
+
+ if (active_prio > 0) {
+ active_prio &= nvic_gprio_mask(s, false);
+ }
+
+ if (pend_prio > 0) {
+ pend_prio &= nvic_gprio_mask(s, false);
+ }
+
+ s->vectpending = pend_irq;
+ s->vectpending_prio = pend_prio;
+ s->exception_prio = active_prio;
+
+ trace_nvic_recompute_state(s->vectpending,
+ s->vectpending_prio,
+ s->exception_prio);
+}
+
+/* Return the current execution priority of the CPU
+ * (equivalent to the pseudocode ExecutionPriority function).
+ * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
+ */
+static inline int nvic_exec_prio(NVICState *s)
+{
+ CPUARMState *env = &s->cpu->env;
+ int running = NVIC_NOEXC_PRIO;
+
+ if (env->v7m.basepri[M_REG_NS] > 0) {
+ running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
+ }
+
+ if (env->v7m.basepri[M_REG_S] > 0) {
+ int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
+ if (running > basepri) {
+ running = basepri;
+ }
+ }
+
+ if (env->v7m.primask[M_REG_NS]) {
+ if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
+ if (running > NVIC_NS_PRIO_LIMIT) {
+ running = NVIC_NS_PRIO_LIMIT;
+ }
+ } else {
+ running = 0;
+ }
+ }
+
+ if (env->v7m.primask[M_REG_S]) {
+ running = 0;
+ }
+
+ if (env->v7m.faultmask[M_REG_NS]) {
+ if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
+ running = -1;
+ } else {
+ if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
+ if (running > NVIC_NS_PRIO_LIMIT) {
+ running = NVIC_NS_PRIO_LIMIT;
+ }
+ } else {
+ running = 0;
+ }
+ }
+ }
+
+ if (env->v7m.faultmask[M_REG_S]) {
+ running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
+ }
+
+ /* consider priority of active handler */
+ return MIN(running, s->exception_prio);
+}
+
+bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
+{
+ /* Return true if the requested execution priority is negative
+ * for the specified security state, ie that security state
+ * has an active NMI or HardFault or has set its FAULTMASK.
+ * Note that this is not the same as whether the execution
+ * priority is actually negative (for instance AIRCR.PRIS may
+ * mean we don't allow FAULTMASK_NS to actually make the execution
+ * priority negative). Compare pseudocode IsReqExcPriNeg().
+ */
+ NVICState *s = opaque;
+
+ if (s->cpu->env.v7m.faultmask[secure]) {
+ return true;
+ }
+
+ if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
+ s->vectors[ARMV7M_EXCP_HARD].active) {
+ return true;
+ }
+
+ if (s->vectors[ARMV7M_EXCP_NMI].active &&
+ exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
+ return true;
+ }
+
+ return false;
+}
+
+bool armv7m_nvic_can_take_pending_exception(void *opaque)
+{
+ NVICState *s = opaque;
+
+ return nvic_exec_prio(s) > nvic_pending_prio(s);
+}
+
+int armv7m_nvic_raw_execution_priority(void *opaque)
+{
+ NVICState *s = opaque;
+
+ return s->exception_prio;
+}
+
+/* caller must call nvic_irq_update() after this.
+ * secure indicates the bank to use for banked exceptions (we assert if
+ * we are passed secure=true for a non-banked exception).
+ */
+static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
+{
+ assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
+ assert(irq < s->num_irq);
+
+ prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
+
+ if (secure) {
+ assert(exc_is_banked(irq));
+ s->sec_vectors[irq].prio = prio;
+ } else {
+ s->vectors[irq].prio = prio;
+ }
+
+ trace_nvic_set_prio(irq, secure, prio);
+}
+
+/* Return the current raw priority register value.
+ * secure indicates the bank to use for banked exceptions (we assert if
+ * we are passed secure=true for a non-banked exception).
+ */
+static int get_prio(NVICState *s, unsigned irq, bool secure)
+{
+ assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
+ assert(irq < s->num_irq);
+
+ if (secure) {
+ assert(exc_is_banked(irq));
+ return s->sec_vectors[irq].prio;
+ } else {
+ return s->vectors[irq].prio;
+ }
+}
+
+/* Recompute state and assert irq line accordingly.
+ * Must be called after changes to:
+ * vec->active, vec->enabled, vec->pending or vec->prio for any vector
+ * prigroup
+ */
+static void nvic_irq_update(NVICState *s)
+{
+ int lvl;
+ int pend_prio;
+
+ nvic_recompute_state(s);
+ pend_prio = nvic_pending_prio(s);
+
+ /* Raise NVIC output if this IRQ would be taken, except that we
+ * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
+ * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
+ * to those CPU registers don't cause us to recalculate the NVIC
+ * pending info.
+ */
+ lvl = (pend_prio < s->exception_prio);
+ trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
+ qemu_set_irq(s->excpout, lvl);
+}
+
+/**
+ * armv7m_nvic_clear_pending: mark the specified exception as not pending
+ * @opaque: the NVIC
+ * @irq: the exception number to mark as not pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Marks the specified exception as not pending. Note that we will assert()
+ * if @secure is true and @irq does not specify one of the fixed set
+ * of architecturally banked exceptions.
+ */
+static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
+{
+ NVICState *s = (NVICState *)opaque;
+ VecInfo *vec;
+
+ assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
+
+ if (secure) {
+ assert(exc_is_banked(irq));
+ vec = &s->sec_vectors[irq];
+ } else {
+ vec = &s->vectors[irq];
+ }
+ trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
+ if (vec->pending) {
+ vec->pending = 0;
+ nvic_irq_update(s);
+ }
+}
+
+static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
+ bool derived)
+{
+ /* Pend an exception, including possibly escalating it to HardFault.
+ *
+ * This function handles both "normal" pending of interrupts and
+ * exceptions, and also derived exceptions (ones which occur as
+ * a result of trying to take some other exception).
+ *
+ * If derived == true, the caller guarantees that we are part way through
+ * trying to take an exception (but have not yet called
+ * armv7m_nvic_acknowledge_irq() to make it active), and so:
+ * - s->vectpending is the "original exception" we were trying to take
+ * - irq is the "derived exception"
+ * - nvic_exec_prio(s) gives the priority before exception entry
+ * Here we handle the prioritization logic which the pseudocode puts
+ * in the DerivedLateArrival() function.
+ */
+
+ NVICState *s = (NVICState *)opaque;
+ bool banked = exc_is_banked(irq);
+ VecInfo *vec;
+ bool targets_secure;
+
+ assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
+ assert(!secure || banked);
+
+ vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
+
+ targets_secure = banked ? secure : exc_targets_secure(s, irq);
+
+ trace_nvic_set_pending(irq, secure, targets_secure,
+ derived, vec->enabled, vec->prio);
+
+ if (derived) {
+ /* Derived exceptions are always synchronous. */
+ assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
+
+ if (irq == ARMV7M_EXCP_DEBUG &&
+ exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
+ /* DebugMonitorFault, but its priority is lower than the
+ * preempted exception priority: just ignore it.
+ */
+ return;
+ }
+
+ if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
+ /* If this is a terminal exception (one which means we cannot
+ * take the original exception, like a failure to read its
+ * vector table entry), then we must take the derived exception.
+ * If the derived exception can't take priority over the
+ * original exception, then we go into Lockup.
+ *
+ * For QEMU, we rely on the fact that a derived exception is
+ * terminal if and only if it's reported to us as HardFault,
+ * which saves having to have an extra argument is_terminal
+ * that we'd only use in one place.
+ */
+ cpu_abort(&s->cpu->parent_obj,
+ "Lockup: can't take terminal derived exception "
+ "(original exception priority %d)\n",
+ s->vectpending_prio);
+ }
+ /* We now continue with the same code as for a normal pending
+ * exception, which will cause us to pend the derived exception.
+ * We'll then take either the original or the derived exception
+ * based on which is higher priority by the usual mechanism
+ * for selecting the highest priority pending interrupt.
+ */
+ }
+
+ if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
+ /* If a synchronous exception is pending then it may be
+ * escalated to HardFault if:
+ * * it is equal or lower priority to current execution
+ * * it is disabled
+ * (ie we need to take it immediately but we can't do so).
+ * Asynchronous exceptions (and interrupts) simply remain pending.
+ *
+ * For QEMU, we don't have any imprecise (asynchronous) faults,
+ * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
+ * synchronous.
+ * Debug exceptions are awkward because only Debug exceptions
+ * resulting from the BKPT instruction should be escalated,
+ * but we don't currently implement any Debug exceptions other
+ * than those that result from BKPT, so we treat all debug exceptions
+ * as needing escalation.
+ *
+ * This all means we can identify whether to escalate based only on
+ * the exception number and don't (yet) need the caller to explicitly
+ * tell us whether this exception is synchronous or not.
+ */
+ int running = nvic_exec_prio(s);
+ bool escalate = false;
+
+ if (exc_group_prio(s, vec->prio, secure) >= running) {
+ trace_nvic_escalate_prio(irq, vec->prio, running);
+ escalate = true;
+ } else if (!vec->enabled) {
+ trace_nvic_escalate_disabled(irq);
+ escalate = true;
+ }
+
+ if (escalate) {
+
+ /* We need to escalate this exception to a synchronous HardFault.
+ * If BFHFNMINS is set then we escalate to the banked HF for
+ * the target security state of the original exception; otherwise
+ * we take a Secure HardFault.
+ */
+ irq = ARMV7M_EXCP_HARD;
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
+ (targets_secure ||
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
+ vec = &s->sec_vectors[irq];
+ } else {
+ vec = &s->vectors[irq];
+ }
+ if (running <= vec->prio) {
+ /* We want to escalate to HardFault but we can't take the
+ * synchronous HardFault at this point either. This is a
+ * Lockup condition due to a guest bug. We don't model
+ * Lockup, so report via cpu_abort() instead.
+ */
+ cpu_abort(&s->cpu->parent_obj,
+ "Lockup: can't escalate %d to HardFault "
+ "(current priority %d)\n", irq, running);
+ }
+
+ /* HF may be banked but there is only one shared HFSR */
+ s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
+ }
+ }
+
+ if (!vec->pending) {
+ vec->pending = 1;
+ nvic_irq_update(s);
+ }
+}
+
+void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
+{
+ do_armv7m_nvic_set_pending(opaque, irq, secure, false);
+}
+
+void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
+{
+ do_armv7m_nvic_set_pending(opaque, irq, secure, true);
+}
+
+void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure)
+{
+ /*
+ * Pend an exception during lazy FP stacking. This differs
+ * from the usual exception pending because the logic for
+ * whether we should escalate depends on the saved context
+ * in the FPCCR register, not on the current state of the CPU/NVIC.
+ */
+ NVICState *s = (NVICState *)opaque;
+ bool banked = exc_is_banked(irq);
+ VecInfo *vec;
+ bool targets_secure;
+ bool escalate = false;
+ /*
+ * We will only look at bits in fpccr if this is a banked exception
+ * (in which case 'secure' tells us whether it is the S or NS version).
+ * All the bits for the non-banked exceptions are in fpccr_s.
+ */
+ uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
+ uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
+
+ assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
+ assert(!secure || banked);
+
+ vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
+
+ targets_secure = banked ? secure : exc_targets_secure(s, irq);
+
+ switch (irq) {
+ case ARMV7M_EXCP_DEBUG:
+ if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
+ /* Ignore DebugMonitor exception */
+ return;
+ }
+ break;
+ case ARMV7M_EXCP_MEM:
+ escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
+ break;
+ case ARMV7M_EXCP_USAGE:
+ escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
+ break;
+ case ARMV7M_EXCP_BUS:
+ escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
+ break;
+ case ARMV7M_EXCP_SECURE:
+ escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (escalate) {
+ /*
+ * Escalate to HardFault: faults that initially targeted Secure
+ * continue to do so, even if HF normally targets NonSecure.
+ */
+ irq = ARMV7M_EXCP_HARD;
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
+ (targets_secure ||
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
+ vec = &s->sec_vectors[irq];
+ } else {
+ vec = &s->vectors[irq];
+ }
+ }
+
+ if (!vec->enabled ||
+ nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
+ if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
+ /*
+ * We want to escalate to HardFault but the context the
+ * FP state belongs to prevents the exception pre-empting.
+ */
+ cpu_abort(&s->cpu->parent_obj,
+ "Lockup: can't escalate to HardFault during "
+ "lazy FP register stacking\n");
+ }
+ }
+
+ if (escalate) {
+ s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
+ }
+ if (!vec->pending) {
+ vec->pending = 1;
+ /*
+ * We do not call nvic_irq_update(), because we know our caller
+ * is going to handle causing us to take the exception by
+ * raising EXCP_LAZYFP, so raising the IRQ line would be
+ * pointless extra work. We just need to recompute the
+ * priorities so that armv7m_nvic_can_take_pending_exception()
+ * returns the right answer.
+ */
+ nvic_recompute_state(s);
+ }
+}
+
+/* Make pending IRQ active. */
+void armv7m_nvic_acknowledge_irq(void *opaque)
+{
+ NVICState *s = (NVICState *)opaque;
+ CPUARMState *env = &s->cpu->env;
+ const int pending = s->vectpending;
+ const int running = nvic_exec_prio(s);
+ VecInfo *vec;
+
+ assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
+
+ if (s->vectpending_is_s_banked) {
+ vec = &s->sec_vectors[pending];
+ } else {
+ vec = &s->vectors[pending];
+ }
+
+ assert(vec->enabled);
+ assert(vec->pending);
+
+ assert(s->vectpending_prio < running);
+
+ trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
+
+ vec->active = 1;
+ vec->pending = 0;
+
+ write_v7m_exception(env, s->vectpending);
+
+ nvic_irq_update(s);
+}
+
+static bool vectpending_targets_secure(NVICState *s)
+{
+ /* Return true if s->vectpending targets Secure state */
+ if (s->vectpending_is_s_banked) {
+ return true;
+ }
+ return !exc_is_banked(s->vectpending) &&
+ exc_targets_secure(s, s->vectpending);
+}
+
+void armv7m_nvic_get_pending_irq_info(void *opaque,
+ int *pirq, bool *ptargets_secure)
+{
+ NVICState *s = (NVICState *)opaque;
+ const int pending = s->vectpending;
+ bool targets_secure;
+
+ assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
+
+ targets_secure = vectpending_targets_secure(s);
+
+ trace_nvic_get_pending_irq_info(pending, targets_secure);
+
+ *ptargets_secure = targets_secure;
+ *pirq = pending;
+}
+
+int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
+{
+ NVICState *s = (NVICState *)opaque;
+ VecInfo *vec = NULL;
+ int ret = 0;
+
+ assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
+
+ trace_nvic_complete_irq(irq, secure);
+
+ if (secure && exc_is_banked(irq)) {
+ vec = &s->sec_vectors[irq];
+ } else {
+ vec = &s->vectors[irq];
+ }
+
+ /*
+ * Identify illegal exception return cases. We can't immediately
+ * return at this point because we still need to deactivate
+ * (either this exception or NMI/HardFault) first.
+ */
+ if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
+ /*
+ * Return from a configurable exception targeting the opposite
+ * security state from the one we're trying to complete it for.
+ * Clear vec because it's not really the VecInfo for this
+ * (irq, secstate) so we mustn't deactivate it.
+ */
+ ret = -1;
+ vec = NULL;
+ } else if (!vec->active) {
+ /* Return from an inactive interrupt */
+ ret = -1;
+ } else {
+ /* Legal return, we will return the RETTOBASE bit value to the caller */
+ ret = nvic_rettobase(s);
+ }
+
+ /*
+ * For negative priorities, v8M will forcibly deactivate the appropriate
+ * NMI or HardFault regardless of what interrupt we're being asked to
+ * deactivate (compare the DeActivate() pseudocode). This is a guard
+ * against software returning from NMI or HardFault with a corrupted
+ * IPSR and leaving the CPU in a negative-priority state.
+ * v7M does not do this, but simply deactivates the requested interrupt.
+ */
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
+ switch (armv7m_nvic_raw_execution_priority(s)) {
+ case -1:
+ if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
+ vec = &s->vectors[ARMV7M_EXCP_HARD];
+ } else {
+ vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
+ }
+ break;
+ case -2:
+ vec = &s->vectors[ARMV7M_EXCP_NMI];
+ break;
+ case -3:
+ vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!vec) {
+ return ret;
+ }
+
+ vec->active = 0;
+ if (vec->level) {
+ /* Re-pend the exception if it's still held high; only
+ * happens for extenal IRQs
+ */
+ assert(irq >= NVIC_FIRST_IRQ);
+ vec->pending = 1;
+ }
+
+ nvic_irq_update(s);
+
+ return ret;
+}
+
+bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
+{
+ /*
+ * Return whether an exception is "ready", i.e. it is enabled and is
+ * configured at a priority which would allow it to interrupt the
+ * current execution priority.
+ *
+ * irq and secure have the same semantics as for armv7m_nvic_set_pending():
+ * for non-banked exceptions secure is always false; for banked exceptions
+ * it indicates which of the exceptions is required.
+ */
+ NVICState *s = (NVICState *)opaque;
+ bool banked = exc_is_banked(irq);
+ VecInfo *vec;
+ int running = nvic_exec_prio(s);
+
+ assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
+ assert(!secure || banked);
+
+ /*
+ * HardFault is an odd special case: we always check against -1,
+ * even if we're secure and HardFault has priority -3; we never
+ * need to check for enabled state.
+ */
+ if (irq == ARMV7M_EXCP_HARD) {
+ return running > -1;
+ }
+
+ vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
+
+ return vec->enabled &&
+ exc_group_prio(s, vec->prio, secure) < running;
+}
+
+/* callback when external interrupt line is changed */
+static void set_irq_level(void *opaque, int n, int level)
+{
+ NVICState *s = opaque;
+ VecInfo *vec;
+
+ n += NVIC_FIRST_IRQ;
+
+ assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
+
+ trace_nvic_set_irq_level(n, level);
+
+ /* The pending status of an external interrupt is
+ * latched on rising edge and exception handler return.
+ *
+ * Pulsing the IRQ will always run the handler
+ * once, and the handler will re-run until the
+ * level is low when the handler completes.
+ */
+ vec = &s->vectors[n];
+ if (level != vec->level) {
+ vec->level = level;
+ if (level) {
+ armv7m_nvic_set_pending(s, n, false);
+ }
+ }
+}
+
+/* callback when external NMI line is changed */
+static void nvic_nmi_trigger(void *opaque, int n, int level)
+{
+ NVICState *s = opaque;
+
+ trace_nvic_set_nmi_level(level);
+
+ /*
+ * The architecture doesn't specify whether NMI should share
+ * the normal-interrupt behaviour of being resampled on
+ * exception handler return. We choose not to, so just
+ * set NMI pending here and don't track the current level.
+ */
+ if (level) {
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
+ }
+}
+
+static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
+{
+ ARMCPU *cpu = s->cpu;
+ uint32_t val;
+
+ switch (offset) {
+ case 4: /* Interrupt Control Type. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
+ return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
+ case 0xc: /* CPPWR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ /* We make the IMPDEF choice that nothing can ever go into a
+ * non-retentive power state, which allows us to RAZ/WI this.
+ */
+ return 0;
+ case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
+ {
+ int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
+ int i;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return 0;
+ }
+ val = 0;
+ for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
+ if (s->itns[startvec + i]) {
+ val |= (1 << i);
+ }
+ }
+ return val;
+ }
+ case 0xcfc:
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) {
+ goto bad_offset;
+ }
+ return cpu->revidr;
+ case 0xd00: /* CPUID Base. */
+ return cpu->midr;
+ case 0xd04: /* Interrupt Control State (ICSR) */
+ /* VECTACTIVE */
+ val = cpu->env.v7m.exception;
+ /* VECTPENDING */
+ if (s->vectpending) {
+ /*
+ * From v8.1M VECTPENDING must read as 1 if accessed as
+ * NonSecure and the highest priority pending and enabled
+ * exception targets Secure.
+ */
+ int vp = s->vectpending;
+ if (!attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_V8_1M) &&
+ vectpending_targets_secure(s)) {
+ vp = 1;
+ }
+ val |= (vp & 0x1ff) << 12;
+ }
+ /* ISRPENDING - set if any external IRQ is pending */
+ if (nvic_isrpending(s)) {
+ val |= (1 << 22);
+ }
+ /* RETTOBASE - set if only one handler is active */
+ if (nvic_rettobase(s)) {
+ val |= (1 << 11);
+ }
+ if (attrs.secure) {
+ /* PENDSTSET */
+ if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
+ val |= (1 << 26);
+ }
+ /* PENDSVSET */
+ if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
+ val |= (1 << 28);
+ }
+ } else {
+ /* PENDSTSET */
+ if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
+ val |= (1 << 26);
+ }
+ /* PENDSVSET */
+ if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
+ val |= (1 << 28);
+ }
+ }
+ /* NMIPENDSET */
+ if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
+ && s->vectors[ARMV7M_EXCP_NMI].pending) {
+ val |= (1 << 31);
+ }
+ /* ISRPREEMPT: RES0 when halting debug not implemented */
+ /* STTNS: RES0 for the Main Extension */
+ return val;
+ case 0xd08: /* Vector Table Offset. */
+ return cpu->env.v7m.vecbase[attrs.secure];
+ case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
+ val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
+ if (attrs.secure) {
+ /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
+ val |= cpu->env.v7m.aircr;
+ } else {
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
+ * security isn't supported then BFHFNMINS is RAO (and
+ * the bit in env.v7m.aircr is always set).
+ */
+ val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
+ }
+ }
+ return val;
+ case 0xd10: /* System Control. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
+ return cpu->env.v7m.scr[attrs.secure];
+ case 0xd14: /* Configuration Control. */
+ /*
+ * Non-banked bits: BFHFNMIGN (stored in the NS copy of the register)
+ * and TRD (stored in the S copy of the register)
+ */
+ val = cpu->env.v7m.ccr[attrs.secure];
+ val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
+ /* BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0 */
+ if (!attrs.secure) {
+ if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ val &= ~R_V7M_CCR_BFHFNMIGN_MASK;
+ }
+ }
+ return val;
+ case 0xd24: /* System Handler Control and State (SHCSR) */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
+ val = 0;
+ if (attrs.secure) {
+ if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
+ val |= (1 << 0);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
+ val |= (1 << 2);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
+ val |= (1 << 3);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
+ val |= (1 << 7);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
+ val |= (1 << 10);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
+ val |= (1 << 11);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
+ val |= (1 << 12);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
+ val |= (1 << 13);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
+ val |= (1 << 15);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
+ val |= (1 << 16);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
+ val |= (1 << 18);
+ }
+ if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
+ val |= (1 << 21);
+ }
+ /* SecureFault is not banked but is always RAZ/WI to NS */
+ if (s->vectors[ARMV7M_EXCP_SECURE].active) {
+ val |= (1 << 4);
+ }
+ if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
+ val |= (1 << 19);
+ }
+ if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
+ val |= (1 << 20);
+ }
+ } else {
+ if (s->vectors[ARMV7M_EXCP_MEM].active) {
+ val |= (1 << 0);
+ }
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
+ if (s->vectors[ARMV7M_EXCP_HARD].active) {
+ val |= (1 << 2);
+ }
+ if (s->vectors[ARMV7M_EXCP_HARD].pending) {
+ val |= (1 << 21);
+ }
+ }
+ if (s->vectors[ARMV7M_EXCP_USAGE].active) {
+ val |= (1 << 3);
+ }
+ if (s->vectors[ARMV7M_EXCP_SVC].active) {
+ val |= (1 << 7);
+ }
+ if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
+ val |= (1 << 10);
+ }
+ if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
+ val |= (1 << 11);
+ }
+ if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
+ val |= (1 << 12);
+ }
+ if (s->vectors[ARMV7M_EXCP_MEM].pending) {
+ val |= (1 << 13);
+ }
+ if (s->vectors[ARMV7M_EXCP_SVC].pending) {
+ val |= (1 << 15);
+ }
+ if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
+ val |= (1 << 16);
+ }
+ if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
+ val |= (1 << 18);
+ }
+ }
+ if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ if (s->vectors[ARMV7M_EXCP_BUS].active) {
+ val |= (1 << 1);
+ }
+ if (s->vectors[ARMV7M_EXCP_BUS].pending) {
+ val |= (1 << 14);
+ }
+ if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
+ val |= (1 << 17);
+ }
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
+ s->vectors[ARMV7M_EXCP_NMI].active) {
+ /* NMIACT is not present in v7M */
+ val |= (1 << 5);
+ }
+ }
+
+ /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
+ if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
+ val |= (1 << 8);
+ }
+ return val;
+ case 0xd2c: /* Hard Fault Status. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->env.v7m.hfsr;
+ case 0xd30: /* Debug Fault Status. */
+ return cpu->env.v7m.dfsr;
+ case 0xd34: /* MMFAR MemManage Fault Address */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->env.v7m.mmfar[attrs.secure];
+ case 0xd38: /* Bus Fault Address. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure &&
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ return 0;
+ }
+ return cpu->env.v7m.bfar;
+ case 0xd3c: /* Aux Fault Status. */
+ /* TODO: Implement fault status registers. */
+ qemu_log_mask(LOG_UNIMP,
+ "Aux Fault status registers unimplemented\n");
+ return 0;
+ case 0xd40: /* PFR0. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_pfr0;
+ case 0xd44: /* PFR1. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_pfr1;
+ case 0xd48: /* DFR0. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_dfr0;
+ case 0xd4c: /* AFR0. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->id_afr0;
+ case 0xd50: /* MMFR0. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_mmfr0;
+ case 0xd54: /* MMFR1. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_mmfr1;
+ case 0xd58: /* MMFR2. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_mmfr2;
+ case 0xd5c: /* MMFR3. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_mmfr3;
+ case 0xd60: /* ISAR0. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_isar0;
+ case 0xd64: /* ISAR1. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_isar1;
+ case 0xd68: /* ISAR2. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_isar2;
+ case 0xd6c: /* ISAR3. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_isar3;
+ case 0xd70: /* ISAR4. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_isar4;
+ case 0xd74: /* ISAR5. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ return cpu->isar.id_isar5;
+ case 0xd78: /* CLIDR */
+ return cpu->clidr;
+ case 0xd7c: /* CTR */
+ return cpu->ctr;
+ case 0xd80: /* CSSIDR */
+ {
+ int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
+ return cpu->ccsidr[idx];
+ }
+ case 0xd84: /* CSSELR */
+ return cpu->env.v7m.csselr[attrs.secure];
+ case 0xd88: /* CPACR */
+ if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ return 0;
+ }
+ return cpu->env.v7m.cpacr[attrs.secure];
+ case 0xd8c: /* NSACR */
+ if (!attrs.secure || !cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ return 0;
+ }
+ return cpu->env.v7m.nsacr;
+ /* TODO: Implement debug registers. */
+ case 0xd90: /* MPU_TYPE */
+ /* Unified MPU; if the MPU is not present this value is zero */
+ return cpu->pmsav7_dregion << 8;
+ case 0xd94: /* MPU_CTRL */
+ return cpu->env.v7m.mpu_ctrl[attrs.secure];
+ case 0xd98: /* MPU_RNR */
+ return cpu->env.pmsav7.rnr[attrs.secure];
+ case 0xd9c: /* MPU_RBAR */
+ case 0xda4: /* MPU_RBAR_A1 */
+ case 0xdac: /* MPU_RBAR_A2 */
+ case 0xdb4: /* MPU_RBAR_A3 */
+ {
+ int region = cpu->env.pmsav7.rnr[attrs.secure];
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* PMSAv8M handling of the aliases is different from v7M:
+ * aliases A1, A2, A3 override the low two bits of the region
+ * number in MPU_RNR, and there is no 'region' field in the
+ * RBAR register.
+ */
+ int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
+ if (aliasno) {
+ region = deposit32(region, 0, 2, aliasno);
+ }
+ if (region >= cpu->pmsav7_dregion) {
+ return 0;
+ }
+ return cpu->env.pmsav8.rbar[attrs.secure][region];
+ }
+
+ if (region >= cpu->pmsav7_dregion) {
+ return 0;
+ }
+ return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
+ }
+ case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
+ case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
+ case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
+ case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
+ {
+ int region = cpu->env.pmsav7.rnr[attrs.secure];
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* PMSAv8M handling of the aliases is different from v7M:
+ * aliases A1, A2, A3 override the low two bits of the region
+ * number in MPU_RNR.
+ */
+ int aliasno = (offset - 0xda0) / 8; /* 0..3 */
+ if (aliasno) {
+ region = deposit32(region, 0, 2, aliasno);
+ }
+ if (region >= cpu->pmsav7_dregion) {
+ return 0;
+ }
+ return cpu->env.pmsav8.rlar[attrs.secure][region];
+ }
+
+ if (region >= cpu->pmsav7_dregion) {
+ return 0;
+ }
+ return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
+ (cpu->env.pmsav7.drsr[region] & 0xffff);
+ }
+ case 0xdc0: /* MPU_MAIR0 */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ return cpu->env.pmsav8.mair0[attrs.secure];
+ case 0xdc4: /* MPU_MAIR1 */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ return cpu->env.pmsav8.mair1[attrs.secure];
+ case 0xdd0: /* SAU_CTRL */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return 0;
+ }
+ return cpu->env.sau.ctrl;
+ case 0xdd4: /* SAU_TYPE */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return 0;
+ }
+ return cpu->sau_sregion;
+ case 0xdd8: /* SAU_RNR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return 0;
+ }
+ return cpu->env.sau.rnr;
+ case 0xddc: /* SAU_RBAR */
+ {
+ int region = cpu->env.sau.rnr;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return 0;
+ }
+ if (region >= cpu->sau_sregion) {
+ return 0;
+ }
+ return cpu->env.sau.rbar[region];
+ }
+ case 0xde0: /* SAU_RLAR */
+ {
+ int region = cpu->env.sau.rnr;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return 0;
+ }
+ if (region >= cpu->sau_sregion) {
+ return 0;
+ }
+ return cpu->env.sau.rlar[region];
+ }
+ case 0xde4: /* SFSR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return 0;
+ }
+ return cpu->env.v7m.sfsr;
+ case 0xde8: /* SFAR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return 0;
+ }
+ return cpu->env.v7m.sfar;
+ case 0xf04: /* RFSR */
+ if (!cpu_isar_feature(aa32_ras, cpu)) {
+ goto bad_offset;
+ }
+ /* We provide minimal-RAS only: RFSR is RAZ/WI */
+ return 0;
+ case 0xf34: /* FPCCR */
+ if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ return 0;
+ }
+ if (attrs.secure) {
+ return cpu->env.v7m.fpccr[M_REG_S];
+ } else {
+ /*
+ * NS can read LSPEN, CLRONRET and MONRDY. It can read
+ * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0;
+ * other non-banked bits RAZ.
+ * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set.
+ */
+ uint32_t value = cpu->env.v7m.fpccr[M_REG_S];
+ uint32_t mask = R_V7M_FPCCR_LSPEN_MASK |
+ R_V7M_FPCCR_CLRONRET_MASK |
+ R_V7M_FPCCR_MONRDY_MASK;
+
+ if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
+ mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK;
+ }
+
+ value &= mask;
+
+ value |= cpu->env.v7m.fpccr[M_REG_NS];
+ return value;
+ }
+ case 0xf38: /* FPCAR */
+ if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ return 0;
+ }
+ return cpu->env.v7m.fpcar[attrs.secure];
+ case 0xf3c: /* FPDSCR */
+ if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ return 0;
+ }
+ return cpu->env.v7m.fpdscr[attrs.secure];
+ case 0xf40: /* MVFR0 */
+ return cpu->isar.mvfr0;
+ case 0xf44: /* MVFR1 */
+ return cpu->isar.mvfr1;
+ case 0xf48: /* MVFR2 */
+ return cpu->isar.mvfr2;
+ default:
+ bad_offset:
+ qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
+ return 0;
+ }
+}
+
+static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
+ MemTxAttrs attrs)
+{
+ ARMCPU *cpu = s->cpu;
+
+ switch (offset) {
+ case 0xc: /* CPPWR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ /* Make the IMPDEF choice to RAZ/WI this. */
+ break;
+ case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
+ {
+ int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
+ int i;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ break;
+ }
+ for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
+ s->itns[startvec + i] = (value >> i) & 1;
+ }
+ nvic_irq_update(s);
+ break;
+ }
+ case 0xd04: /* Interrupt Control State (ICSR) */
+ if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
+ if (value & (1 << 31)) {
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
+ } else if (value & (1 << 30) &&
+ arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* PENDNMICLR didn't exist in v7M */
+ armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
+ }
+ }
+ if (value & (1 << 28)) {
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
+ } else if (value & (1 << 27)) {
+ armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
+ }
+ if (value & (1 << 26)) {
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
+ } else if (value & (1 << 25)) {
+ armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
+ }
+ break;
+ case 0xd08: /* Vector Table Offset. */
+ cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
+ break;
+ case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
+ if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
+ if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
+ if (attrs.secure ||
+ !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
+ signal_sysresetreq(s);
+ }
+ }
+ if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Setting VECTCLRACTIVE when not in DEBUG mode "
+ "is UNPREDICTABLE\n");
+ }
+ if (value & R_V7M_AIRCR_VECTRESET_MASK) {
+ /* NB: this bit is RES0 in v8M */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Setting VECTRESET when not in DEBUG mode "
+ "is UNPREDICTABLE\n");
+ }
+ if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ s->prigroup[attrs.secure] =
+ extract32(value,
+ R_V7M_AIRCR_PRIGROUP_SHIFT,
+ R_V7M_AIRCR_PRIGROUP_LENGTH);
+ }
+ /* AIRCR.IESB is RAZ/WI because we implement only minimal RAS */
+ if (attrs.secure) {
+ /* These bits are only writable by secure */
+ cpu->env.v7m.aircr = value &
+ (R_V7M_AIRCR_SYSRESETREQS_MASK |
+ R_V7M_AIRCR_BFHFNMINS_MASK |
+ R_V7M_AIRCR_PRIS_MASK);
+ /* BFHFNMINS changes the priority of Secure HardFault, and
+ * allows a pending Non-secure HardFault to preempt (which
+ * we implement by marking it enabled).
+ */
+ if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
+ s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
+ s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
+ } else {
+ s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
+ s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
+ }
+ }
+ nvic_irq_update(s);
+ }
+ break;
+ case 0xd10: /* System Control. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
+ /* We don't implement deep-sleep so these bits are RAZ/WI.
+ * The other bits in the register are banked.
+ * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
+ * is architecturally permitted.
+ */
+ value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
+ cpu->env.v7m.scr[attrs.secure] = value;
+ break;
+ case 0xd14: /* Configuration Control. */
+ {
+ uint32_t mask;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+
+ /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
+ mask = R_V7M_CCR_STKALIGN_MASK |
+ R_V7M_CCR_BFHFNMIGN_MASK |
+ R_V7M_CCR_DIV_0_TRP_MASK |
+ R_V7M_CCR_UNALIGN_TRP_MASK |
+ R_V7M_CCR_USERSETMPEND_MASK |
+ R_V7M_CCR_NONBASETHRDENA_MASK;
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) {
+ /* TRD is always RAZ/WI from NS */
+ mask |= R_V7M_CCR_TRD_MASK;
+ }
+ value &= mask;
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
+ value |= R_V7M_CCR_NONBASETHRDENA_MASK
+ | R_V7M_CCR_STKALIGN_MASK;
+ }
+ if (attrs.secure) {
+ /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
+ cpu->env.v7m.ccr[M_REG_NS] =
+ (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
+ | (value & R_V7M_CCR_BFHFNMIGN_MASK);
+ value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
+ } else {
+ /*
+ * BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0, so
+ * preserve the state currently in the NS element of the array
+ */
+ if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
+ value |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
+ }
+ }
+
+ cpu->env.v7m.ccr[attrs.secure] = value;
+ break;
+ }
+ case 0xd24: /* System Handler Control and State (SHCSR) */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
+ if (attrs.secure) {
+ s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
+ /* Secure HardFault active bit cannot be written */
+ s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
+ (value & (1 << 10)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
+ (value & (1 << 11)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
+ (value & (1 << 12)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
+ (value & (1 << 18)) != 0;
+ s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
+ /* SecureFault not banked, but RAZ/WI to NS */
+ s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
+ s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
+ s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
+ } else {
+ s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* HARDFAULTPENDED is not present in v7M */
+ s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
+ }
+ s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
+ s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
+ s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
+ s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
+ s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
+ s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
+ s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
+ s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
+ s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
+ }
+ if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
+ s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
+ s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
+ }
+ /* NMIACT can only be written if the write is of a zero, with
+ * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
+ */
+ if (!attrs.secure && cpu->env.v7m.secure &&
+ (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
+ (value & (1 << 5)) == 0) {
+ s->vectors[ARMV7M_EXCP_NMI].active = 0;
+ }
+ /* HARDFAULTACT can only be written if the write is of a zero
+ * to the non-secure HardFault state by the CPU in secure state.
+ * The only case where we can be targeting the non-secure HF state
+ * when in secure state is if this is a write via the NS alias
+ * and BFHFNMINS is 1.
+ */
+ if (!attrs.secure && cpu->env.v7m.secure &&
+ (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
+ (value & (1 << 2)) == 0) {
+ s->vectors[ARMV7M_EXCP_HARD].active = 0;
+ }
+
+ /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
+ s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
+ nvic_irq_update(s);
+ break;
+ case 0xd2c: /* Hard Fault Status. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ cpu->env.v7m.hfsr &= ~value; /* W1C */
+ break;
+ case 0xd30: /* Debug Fault Status. */
+ cpu->env.v7m.dfsr &= ~value; /* W1C */
+ break;
+ case 0xd34: /* Mem Manage Address. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ cpu->env.v7m.mmfar[attrs.secure] = value;
+ return;
+ case 0xd38: /* Bus Fault Address. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure &&
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ return;
+ }
+ cpu->env.v7m.bfar = value;
+ return;
+ case 0xd3c: /* Aux Fault Status. */
+ qemu_log_mask(LOG_UNIMP,
+ "NVIC: Aux fault status registers unimplemented\n");
+ break;
+ case 0xd84: /* CSSELR */
+ if (!arm_v7m_csselr_razwi(cpu)) {
+ cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
+ }
+ break;
+ case 0xd88: /* CPACR */
+ if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ /* We implement only the Floating Point extension's CP10/CP11 */
+ cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
+ }
+ break;
+ case 0xd8c: /* NSACR */
+ if (attrs.secure && cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ /* We implement only the Floating Point extension's CP10/CP11 */
+ cpu->env.v7m.nsacr = value & (3 << 10);
+ }
+ break;
+ case 0xd90: /* MPU_TYPE */
+ return; /* RO */
+ case 0xd94: /* MPU_CTRL */
+ if ((value &
+ (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
+ == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
+ "UNPREDICTABLE\n");
+ }
+ cpu->env.v7m.mpu_ctrl[attrs.secure]
+ = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
+ R_V7M_MPU_CTRL_HFNMIENA_MASK |
+ R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
+ tlb_flush(CPU(cpu));
+ break;
+ case 0xd98: /* MPU_RNR */
+ if (value >= cpu->pmsav7_dregion) {
+ qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
+ PRIu32 "/%" PRIu32 "\n",
+ value, cpu->pmsav7_dregion);
+ } else {
+ cpu->env.pmsav7.rnr[attrs.secure] = value;
+ }
+ break;
+ case 0xd9c: /* MPU_RBAR */
+ case 0xda4: /* MPU_RBAR_A1 */
+ case 0xdac: /* MPU_RBAR_A2 */
+ case 0xdb4: /* MPU_RBAR_A3 */
+ {
+ int region;
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* PMSAv8M handling of the aliases is different from v7M:
+ * aliases A1, A2, A3 override the low two bits of the region
+ * number in MPU_RNR, and there is no 'region' field in the
+ * RBAR register.
+ */
+ int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
+
+ region = cpu->env.pmsav7.rnr[attrs.secure];
+ if (aliasno) {
+ region = deposit32(region, 0, 2, aliasno);
+ }
+ if (region >= cpu->pmsav7_dregion) {
+ return;
+ }
+ cpu->env.pmsav8.rbar[attrs.secure][region] = value;
+ tlb_flush(CPU(cpu));
+ return;
+ }
+
+ if (value & (1 << 4)) {
+ /* VALID bit means use the region number specified in this
+ * value and also update MPU_RNR.REGION with that value.
+ */
+ region = extract32(value, 0, 4);
+ if (region >= cpu->pmsav7_dregion) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "MPU region out of range %u/%" PRIu32 "\n",
+ region, cpu->pmsav7_dregion);
+ return;
+ }
+ cpu->env.pmsav7.rnr[attrs.secure] = region;
+ } else {
+ region = cpu->env.pmsav7.rnr[attrs.secure];
+ }
+
+ if (region >= cpu->pmsav7_dregion) {
+ return;
+ }
+
+ cpu->env.pmsav7.drbar[region] = value & ~0x1f;
+ tlb_flush(CPU(cpu));
+ break;
+ }
+ case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
+ case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
+ case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
+ case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
+ {
+ int region = cpu->env.pmsav7.rnr[attrs.secure];
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* PMSAv8M handling of the aliases is different from v7M:
+ * aliases A1, A2, A3 override the low two bits of the region
+ * number in MPU_RNR.
+ */
+ int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
+
+ region = cpu->env.pmsav7.rnr[attrs.secure];
+ if (aliasno) {
+ region = deposit32(region, 0, 2, aliasno);
+ }
+ if (region >= cpu->pmsav7_dregion) {
+ return;
+ }
+ cpu->env.pmsav8.rlar[attrs.secure][region] = value;
+ tlb_flush(CPU(cpu));
+ return;
+ }
+
+ if (region >= cpu->pmsav7_dregion) {
+ return;
+ }
+
+ cpu->env.pmsav7.drsr[region] = value & 0xff3f;
+ cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
+ tlb_flush(CPU(cpu));
+ break;
+ }
+ case 0xdc0: /* MPU_MAIR0 */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (cpu->pmsav7_dregion) {
+ /* Register is RES0 if no MPU regions are implemented */
+ cpu->env.pmsav8.mair0[attrs.secure] = value;
+ }
+ /* We don't need to do anything else because memory attributes
+ * only affect cacheability, and we don't implement caching.
+ */
+ break;
+ case 0xdc4: /* MPU_MAIR1 */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (cpu->pmsav7_dregion) {
+ /* Register is RES0 if no MPU regions are implemented */
+ cpu->env.pmsav8.mair1[attrs.secure] = value;
+ }
+ /* We don't need to do anything else because memory attributes
+ * only affect cacheability, and we don't implement caching.
+ */
+ break;
+ case 0xdd0: /* SAU_CTRL */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return;
+ }
+ cpu->env.sau.ctrl = value & 3;
+ break;
+ case 0xdd4: /* SAU_TYPE */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ break;
+ case 0xdd8: /* SAU_RNR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return;
+ }
+ if (value >= cpu->sau_sregion) {
+ qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
+ PRIu32 "/%" PRIu32 "\n",
+ value, cpu->sau_sregion);
+ } else {
+ cpu->env.sau.rnr = value;
+ }
+ break;
+ case 0xddc: /* SAU_RBAR */
+ {
+ int region = cpu->env.sau.rnr;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return;
+ }
+ if (region >= cpu->sau_sregion) {
+ return;
+ }
+ cpu->env.sau.rbar[region] = value & ~0x1f;
+ tlb_flush(CPU(cpu));
+ break;
+ }
+ case 0xde0: /* SAU_RLAR */
+ {
+ int region = cpu->env.sau.rnr;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return;
+ }
+ if (region >= cpu->sau_sregion) {
+ return;
+ }
+ cpu->env.sau.rlar[region] = value & ~0x1c;
+ tlb_flush(CPU(cpu));
+ break;
+ }
+ case 0xde4: /* SFSR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return;
+ }
+ cpu->env.v7m.sfsr &= ~value; /* W1C */
+ break;
+ case 0xde8: /* SFAR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ if (!attrs.secure) {
+ return;
+ }
+ cpu->env.v7m.sfsr = value;
+ break;
+ case 0xf00: /* Software Triggered Interrupt Register */
+ {
+ int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+
+ if (excnum < s->num_irq) {
+ armv7m_nvic_set_pending(s, excnum, false);
+ }
+ break;
+ }
+ case 0xf04: /* RFSR */
+ if (!cpu_isar_feature(aa32_ras, cpu)) {
+ goto bad_offset;
+ }
+ /* We provide minimal-RAS only: RFSR is RAZ/WI */
+ break;
+ case 0xf34: /* FPCCR */
+ if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ /* Not all bits here are banked. */
+ uint32_t fpccr_s;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ /* Don't allow setting of bits not present in v7M */
+ value &= (R_V7M_FPCCR_LSPACT_MASK |
+ R_V7M_FPCCR_USER_MASK |
+ R_V7M_FPCCR_THREAD_MASK |
+ R_V7M_FPCCR_HFRDY_MASK |
+ R_V7M_FPCCR_MMRDY_MASK |
+ R_V7M_FPCCR_BFRDY_MASK |
+ R_V7M_FPCCR_MONRDY_MASK |
+ R_V7M_FPCCR_LSPEN_MASK |
+ R_V7M_FPCCR_ASPEN_MASK);
+ }
+ value &= ~R_V7M_FPCCR_RES0_MASK;
+
+ if (!attrs.secure) {
+ /* Some non-banked bits are configurably writable by NS */
+ fpccr_s = cpu->env.v7m.fpccr[M_REG_S];
+ if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) {
+ uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN);
+ fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen);
+ }
+ if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) {
+ uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET);
+ fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor);
+ }
+ if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY);
+ uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY);
+ fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
+ fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
+ }
+ /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */
+ {
+ uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY);
+ fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy);
+ }
+
+ /*
+ * All other non-banked bits are RAZ/WI from NS; write
+ * just the banked bits to fpccr[M_REG_NS].
+ */
+ value &= R_V7M_FPCCR_BANKED_MASK;
+ cpu->env.v7m.fpccr[M_REG_NS] = value;
+ } else {
+ fpccr_s = value;
+ }
+ cpu->env.v7m.fpccr[M_REG_S] = fpccr_s;
+ }
+ break;
+ case 0xf38: /* FPCAR */
+ if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ value &= ~7;
+ cpu->env.v7m.fpcar[attrs.secure] = value;
+ }
+ break;
+ case 0xf3c: /* FPDSCR */
+ if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK;
+ if (cpu_isar_feature(any_fp16, cpu)) {
+ mask |= FPCR_FZ16;
+ }
+ value &= mask;
+ if (cpu_isar_feature(aa32_lob, cpu)) {
+ value |= 4 << FPCR_LTPSIZE_SHIFT;
+ }
+ cpu->env.v7m.fpdscr[attrs.secure] = value;
+ }
+ break;
+ case 0xf50: /* ICIALLU */
+ case 0xf58: /* ICIMVAU */
+ case 0xf5c: /* DCIMVAC */
+ case 0xf60: /* DCISW */
+ case 0xf64: /* DCCMVAU */
+ case 0xf68: /* DCCMVAC */
+ case 0xf6c: /* DCCSW */
+ case 0xf70: /* DCCIMVAC */
+ case 0xf74: /* DCCISW */
+ case 0xf78: /* BPIALL */
+ /* Cache and branch predictor maintenance: for QEMU these always NOP */
+ break;
+ default:
+ bad_offset:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "NVIC: Bad write offset 0x%x\n", offset);
+ }
+}
+
+static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
+{
+ /* Return true if unprivileged access to this register is permitted. */
+ switch (offset) {
+ case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
+ /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
+ * controls access even though the CPU is in Secure state (I_QDKX).
+ */
+ return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
+ default:
+ /* All other user accesses cause a BusFault unconditionally */
+ return false;
+ }
+}
+
+static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
+{
+ /* Behaviour for the SHPR register field for this exception:
+ * return M_REG_NS to use the nonsecure vector (including for
+ * non-banked exceptions), M_REG_S for the secure version of
+ * a banked exception, and -1 if this field should RAZ/WI.
+ */
+ switch (exc) {
+ case ARMV7M_EXCP_MEM:
+ case ARMV7M_EXCP_USAGE:
+ case ARMV7M_EXCP_SVC:
+ case ARMV7M_EXCP_PENDSV:
+ case ARMV7M_EXCP_SYSTICK:
+ /* Banked exceptions */
+ return attrs.secure;
+ case ARMV7M_EXCP_BUS:
+ /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
+ if (!attrs.secure &&
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ return -1;
+ }
+ return M_REG_NS;
+ case ARMV7M_EXCP_SECURE:
+ /* Not banked, RAZ/WI from nonsecure */
+ if (!attrs.secure) {
+ return -1;
+ }
+ return M_REG_NS;
+ case ARMV7M_EXCP_DEBUG:
+ /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
+ return M_REG_NS;
+ case 8 ... 10:
+ case 13:
+ /* RES0 */
+ return -1;
+ default:
+ /* Not reachable due to decode of SHPR register addresses */
+ g_assert_not_reached();
+ }
+}
+
+static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ NVICState *s = (NVICState *)opaque;
+ uint32_t offset = addr;
+ unsigned i, startvec, end;
+ uint32_t val;
+
+ if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
+ /* Generate BusFault for unprivileged accesses */
+ return MEMTX_ERROR;
+ }
+
+ switch (offset) {
+ /* reads of set and clear both return the status */
+ case 0x100 ... 0x13f: /* NVIC Set enable */
+ offset += 0x80;
+ /* fall through */
+ case 0x180 ... 0x1bf: /* NVIC Clear enable */
+ val = 0;
+ startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */
+
+ for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
+ if (s->vectors[startvec + i].enabled &&
+ (attrs.secure || s->itns[startvec + i])) {
+ val |= (1 << i);
+ }
+ }
+ break;
+ case 0x200 ... 0x23f: /* NVIC Set pend */
+ offset += 0x80;
+ /* fall through */
+ case 0x280 ... 0x2bf: /* NVIC Clear pend */
+ val = 0;
+ startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
+ for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
+ if (s->vectors[startvec + i].pending &&
+ (attrs.secure || s->itns[startvec + i])) {
+ val |= (1 << i);
+ }
+ }
+ break;
+ case 0x300 ... 0x33f: /* NVIC Active */
+ val = 0;
+
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
+ break;
+ }
+
+ startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
+
+ for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
+ if (s->vectors[startvec + i].active &&
+ (attrs.secure || s->itns[startvec + i])) {
+ val |= (1 << i);
+ }
+ }
+ break;
+ case 0x400 ... 0x5ef: /* NVIC Priority */
+ val = 0;
+ startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
+
+ for (i = 0; i < size && startvec + i < s->num_irq; i++) {
+ if (attrs.secure || s->itns[startvec + i]) {
+ val |= s->vectors[startvec + i].prio << (8 * i);
+ }
+ }
+ break;
+ case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ val = 0;
+ break;
+ }
+ /* fall through */
+ case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
+ val = 0;
+ for (i = 0; i < size; i++) {
+ unsigned hdlidx = (offset - 0xd14) + i;
+ int sbank = shpr_bank(s, hdlidx, attrs);
+
+ if (sbank < 0) {
+ continue;
+ }
+ val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
+ }
+ break;
+ case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ val = 0;
+ break;
+ };
+ /*
+ * The BFSR bits [15:8] are shared between security states
+ * and we store them in the NS copy. They are RAZ/WI for
+ * NS code if AIRCR.BFHFNMINS is 0.
+ */
+ val = s->cpu->env.v7m.cfsr[attrs.secure];
+ if (!attrs.secure &&
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ val &= ~R_V7M_CFSR_BFSR_MASK;
+ } else {
+ val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
+ }
+ val = extract32(val, (offset - 0xd28) * 8, size * 8);
+ break;
+ case 0xfe0 ... 0xfff: /* ID. */
+ if (offset & 3) {
+ val = 0;
+ } else {
+ val = nvic_id[(offset - 0xfe0) >> 2];
+ }
+ break;
+ default:
+ if (size == 4) {
+ val = nvic_readl(s, offset, attrs);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "NVIC: Bad read of size %d at offset 0x%x\n",
+ size, offset);
+ val = 0;
+ }
+ }
+
+ trace_nvic_sysreg_read(addr, val, size);
+ *data = val;
+ return MEMTX_OK;
+}
+
+static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ NVICState *s = (NVICState *)opaque;
+ uint32_t offset = addr;
+ unsigned i, startvec, end;
+ unsigned setval = 0;
+
+ trace_nvic_sysreg_write(addr, value, size);
+
+ if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
+ /* Generate BusFault for unprivileged accesses */
+ return MEMTX_ERROR;
+ }
+
+ switch (offset) {
+ case 0x100 ... 0x13f: /* NVIC Set enable */
+ offset += 0x80;
+ setval = 1;
+ /* fall through */
+ case 0x180 ... 0x1bf: /* NVIC Clear enable */
+ startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
+
+ for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
+ if (value & (1 << i) &&
+ (attrs.secure || s->itns[startvec + i])) {
+ s->vectors[startvec + i].enabled = setval;
+ }
+ }
+ nvic_irq_update(s);
+ goto exit_ok;
+ case 0x200 ... 0x23f: /* NVIC Set pend */
+ /* the special logic in armv7m_nvic_set_pending()
+ * is not needed since IRQs are never escalated
+ */
+ offset += 0x80;
+ setval = 1;
+ /* fall through */
+ case 0x280 ... 0x2bf: /* NVIC Clear pend */
+ startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
+
+ for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
+ if (value & (1 << i) &&
+ (attrs.secure || s->itns[startvec + i])) {
+ s->vectors[startvec + i].pending = setval;
+ }
+ }
+ nvic_irq_update(s);
+ goto exit_ok;
+ case 0x300 ... 0x33f: /* NVIC Active */
+ goto exit_ok; /* R/O */
+ case 0x400 ... 0x5ef: /* NVIC Priority */
+ startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
+
+ for (i = 0; i < size && startvec + i < s->num_irq; i++) {
+ if (attrs.secure || s->itns[startvec + i]) {
+ set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
+ }
+ }
+ nvic_irq_update(s);
+ goto exit_ok;
+ case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto exit_ok;
+ }
+ /* fall through */
+ case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
+ for (i = 0; i < size; i++) {
+ unsigned hdlidx = (offset - 0xd14) + i;
+ int newprio = extract32(value, i * 8, 8);
+ int sbank = shpr_bank(s, hdlidx, attrs);
+
+ if (sbank < 0) {
+ continue;
+ }
+ set_prio(s, hdlidx, sbank, newprio);
+ }
+ nvic_irq_update(s);
+ goto exit_ok;
+ case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto exit_ok;
+ }
+ /* All bits are W1C, so construct 32 bit value with 0s in
+ * the parts not written by the access size
+ */
+ value <<= ((offset - 0xd28) * 8);
+
+ if (!attrs.secure &&
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ /* BFSR bits are RAZ/WI for NS if BFHFNMINS is set */
+ value &= ~R_V7M_CFSR_BFSR_MASK;
+ }
+
+ s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
+ if (attrs.secure) {
+ /* The BFSR bits [15:8] are shared between security states
+ * and we store them in the NS copy.
+ */
+ s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
+ }
+ goto exit_ok;
+ }
+ if (size == 4) {
+ nvic_writel(s, offset, value, attrs);
+ goto exit_ok;
+ }
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
+ /* This is UNPREDICTABLE; treat as RAZ/WI */
+
+ exit_ok:
+ /* Ensure any changes made are reflected in the cached hflags. */
+ arm_rebuild_hflags(&s->cpu->env);
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps nvic_sysreg_ops = {
+ .read_with_attrs = nvic_sysreg_read,
+ .write_with_attrs = nvic_sysreg_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int nvic_post_load(void *opaque, int version_id)
+{
+ NVICState *s = opaque;
+ unsigned i;
+ int resetprio;
+
+ /* Check for out of range priority settings */
+ resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
+
+ if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
+ s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
+ s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
+ return 1;
+ }
+ for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
+ if (s->vectors[i].prio & ~0xff) {
+ return 1;
+ }
+ }
+
+ nvic_recompute_state(s);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_VecInfo = {
+ .name = "armv7m_nvic_info",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT16(prio, VecInfo),
+ VMSTATE_UINT8(enabled, VecInfo),
+ VMSTATE_UINT8(pending, VecInfo),
+ VMSTATE_UINT8(active, VecInfo),
+ VMSTATE_UINT8(level, VecInfo),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool nvic_security_needed(void *opaque)
+{
+ NVICState *s = opaque;
+
+ return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
+}
+
+static int nvic_security_post_load(void *opaque, int version_id)
+{
+ NVICState *s = opaque;
+ int i;
+
+ /* Check for out of range priority settings */
+ if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
+ && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
+ /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
+ * if the CPU state has been migrated yet; a mismatch won't
+ * cause the emulation to blow up, though.
+ */
+ return 1;
+ }
+ for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
+ if (s->sec_vectors[i].prio & ~0xff) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static const VMStateDescription vmstate_nvic_security = {
+ .name = "armv7m_nvic/m-security",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = nvic_security_needed,
+ .post_load = &nvic_security_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
+ vmstate_VecInfo, VecInfo),
+ VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
+ VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_nvic = {
+ .name = "armv7m_nvic",
+ .version_id = 4,
+ .minimum_version_id = 4,
+ .post_load = &nvic_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
+ vmstate_VecInfo, VecInfo),
+ VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_nvic_security,
+ NULL
+ }
+};
+
+static Property props_nvic[] = {
+ /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
+ DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void armv7m_nvic_reset(DeviceState *dev)
+{
+ int resetprio;
+ NVICState *s = NVIC(dev);
+
+ memset(s->vectors, 0, sizeof(s->vectors));
+ memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
+ s->prigroup[M_REG_NS] = 0;
+ s->prigroup[M_REG_S] = 0;
+
+ s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
+ /* MEM, BUS, and USAGE are enabled through
+ * the System Handler Control register
+ */
+ s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
+ s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
+ s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
+
+ /* DebugMonitor is enabled via DEMCR.MON_EN */
+ s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0;
+
+ resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
+ s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
+ s->vectors[ARMV7M_EXCP_NMI].prio = -2;
+ s->vectors[ARMV7M_EXCP_HARD].prio = -1;
+
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
+ s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
+ s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
+ s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
+ s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
+
+ /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
+ s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
+ /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
+ s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
+ } else {
+ s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
+ }
+
+ /* Strictly speaking the reset handler should be enabled.
+ * However, we don't simulate soft resets through the NVIC,
+ * and the reset vector should never be pended.
+ * So we leave it disabled to catch logic errors.
+ */
+
+ s->exception_prio = NVIC_NOEXC_PRIO;
+ s->vectpending = 0;
+ s->vectpending_is_s_banked = false;
+ s->vectpending_prio = NVIC_NOEXC_PRIO;
+
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
+ memset(s->itns, 0, sizeof(s->itns));
+ } else {
+ /* This state is constant and not guest accessible in a non-security
+ * NVIC; we set the bits to true to avoid having to do a feature
+ * bit check in the NVIC enable/pend/etc register accessors.
+ */
+ int i;
+
+ for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
+ s->itns[i] = true;
+ }
+ }
+
+ /*
+ * We updated state that affects the CPU's MMUidx and thus its hflags;
+ * and we can't guarantee that we run before the CPU reset function.
+ */
+ arm_rebuild_hflags(&s->cpu->env);
+}
+
+static void nvic_systick_trigger(void *opaque, int n, int level)
+{
+ NVICState *s = opaque;
+
+ if (level) {
+ /* SysTick just asked us to pend its exception.
+ * (This is different from an external interrupt line's
+ * behaviour.)
+ * n == 0 : NonSecure systick
+ * n == 1 : Secure systick
+ */
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
+ }
+}
+
+static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
+{
+ NVICState *s = NVIC(dev);
+
+ /* The armv7m container object will have set our CPU pointer */
+ if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
+ error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
+ return;
+ }
+
+ if (s->num_irq > NVIC_MAX_IRQ) {
+ error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
+ return;
+ }
+
+ qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
+
+ /* include space for internal exception vectors */
+ s->num_irq += NVIC_FIRST_IRQ;
+
+ s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
+
+ /*
+ * This device provides a single memory region which covers the
+ * sysreg/NVIC registers from 0xE000E000 .. 0xE000EFFF, with the
+ * exception of the systick timer registers 0xE000E010 .. 0xE000E0FF.
+ */
+ memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
+ "nvic_sysregs", 0x1000);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->sysregmem);
+}
+
+static void armv7m_nvic_instance_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ NVICState *nvic = NVIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ sysbus_init_irq(sbd, &nvic->excpout);
+ qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
+ qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
+ M_REG_NUM_BANKS);
+ qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
+}
+
+static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_nvic;
+ device_class_set_props(dc, props_nvic);
+ dc->reset = armv7m_nvic_reset;
+ dc->realize = armv7m_nvic_realize;
+}
+
+static const TypeInfo armv7m_nvic_info = {
+ .name = TYPE_NVIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = armv7m_nvic_instance_init,
+ .instance_size = sizeof(NVICState),
+ .class_init = armv7m_nvic_class_init,
+ .class_size = sizeof(SysBusDeviceClass),
+};
+
+static void armv7m_nvic_register_types(void)
+{
+ type_register_static(&armv7m_nvic_info);
+}
+
+type_init(armv7m_nvic_register_types)
diff --git a/hw/intc/aspeed_vic.c b/hw/intc/aspeed_vic.c
new file mode 100644
index 000000000..5ba06c526
--- /dev/null
+++ b/hw/intc/aspeed_vic.c
@@ -0,0 +1,363 @@
+/*
+ * ASPEED Interrupt Controller (New)
+ *
+ * Andrew Jeffery <andrew@aj.id.au>
+ *
+ * Copyright 2015, 2016 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+/* The hardware exposes two register sets, a legacy set and a 'new' set. The
+ * model implements the 'new' register set, and logs warnings on accesses to
+ * the legacy IO space.
+ *
+ * The hardware uses 32bit registers to manage 51 IRQs, with low and high
+ * registers for each conceptual register. The device model's implementation
+ * uses 64bit data types to store both low and high register values (in the one
+ * member), but must cope with access offset values in multiples of 4 passed to
+ * the callbacks. As such the read() and write() implementations process the
+ * provided offset to understand whether the access is requesting the lower or
+ * upper 32 bits of the 64bit member.
+ *
+ * Additionally, the "Interrupt Enable", "Edge Status" and "Software Interrupt"
+ * fields have separate "enable"/"status" and "clear" registers, where set bits
+ * are written to one or the other to change state (avoiding a
+ * read-modify-write sequence).
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/aspeed_vic.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "trace.h"
+
+#define AVIC_NEW_BASE_OFFSET 0x80
+
+#define AVIC_L_MASK 0xFFFFFFFFU
+#define AVIC_H_MASK 0x0007FFFFU
+#define AVIC_EVENT_W_MASK (0x78000ULL << 32)
+
+static void aspeed_vic_update(AspeedVICState *s)
+{
+ uint64_t new = (s->raw & s->enable);
+ uint64_t flags;
+
+ flags = new & s->select;
+ trace_aspeed_vic_update_fiq(!!flags);
+ qemu_set_irq(s->fiq, !!flags);
+
+ flags = new & ~s->select;
+ trace_aspeed_vic_update_irq(!!flags);
+ qemu_set_irq(s->irq, !!flags);
+}
+
+static void aspeed_vic_set_irq(void *opaque, int irq, int level)
+{
+ uint64_t irq_mask;
+ bool raise;
+ AspeedVICState *s = (AspeedVICState *)opaque;
+
+ if (irq > ASPEED_VIC_NR_IRQS) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n",
+ __func__, irq);
+ return;
+ }
+
+ trace_aspeed_vic_set_irq(irq, level);
+
+ irq_mask = BIT(irq);
+ if (s->sense & irq_mask) {
+ /* level-triggered */
+ if (s->event & irq_mask) {
+ /* high-sensitive */
+ raise = level;
+ } else {
+ /* low-sensitive */
+ raise = !level;
+ }
+ s->raw = deposit64(s->raw, irq, 1, raise);
+ } else {
+ uint64_t old_level = s->level & irq_mask;
+
+ /* edge-triggered */
+ if (s->dual_edge & irq_mask) {
+ raise = (!!old_level) != (!!level);
+ } else {
+ if (s->event & irq_mask) {
+ /* rising-sensitive */
+ raise = !old_level && level;
+ } else {
+ /* falling-sensitive */
+ raise = old_level && !level;
+ }
+ }
+ if (raise) {
+ s->raw = deposit64(s->raw, irq, 1, raise);
+ }
+ }
+ s->level = deposit64(s->level, irq, 1, level);
+ aspeed_vic_update(s);
+}
+
+static uint64_t aspeed_vic_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AspeedVICState *s = (AspeedVICState *)opaque;
+ hwaddr n_offset;
+ uint64_t val;
+ bool high;
+
+ if (offset < AVIC_NEW_BASE_OFFSET) {
+ high = false;
+ n_offset = offset;
+ } else {
+ high = !!(offset & 0x4);
+ n_offset = (offset & ~0x4);
+ }
+
+ switch (n_offset) {
+ case 0x80: /* IRQ Status */
+ case 0x00:
+ val = s->raw & ~s->select & s->enable;
+ break;
+ case 0x88: /* FIQ Status */
+ case 0x04:
+ val = s->raw & s->select & s->enable;
+ break;
+ case 0x90: /* Raw Interrupt Status */
+ case 0x08:
+ val = s->raw;
+ break;
+ case 0x98: /* Interrupt Selection */
+ case 0x0c:
+ val = s->select;
+ break;
+ case 0xa0: /* Interrupt Enable */
+ case 0x10:
+ val = s->enable;
+ break;
+ case 0xb0: /* Software Interrupt */
+ case 0x18:
+ val = s->trigger;
+ break;
+ case 0xc0: /* Interrupt Sensitivity */
+ case 0x24:
+ val = s->sense;
+ break;
+ case 0xc8: /* Interrupt Both Edge Trigger Control */
+ case 0x28:
+ val = s->dual_edge;
+ break;
+ case 0xd0: /* Interrupt Event */
+ case 0x2c:
+ val = s->event;
+ break;
+ case 0xe0: /* Edge Triggered Interrupt Status */
+ val = s->raw & ~s->sense;
+ break;
+ /* Illegal */
+ case 0xa8: /* Interrupt Enable Clear */
+ case 0xb8: /* Software Interrupt Clear */
+ case 0xd8: /* Edge Triggered Interrupt Clear */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Read of write-only register with offset 0x%"
+ HWADDR_PRIx "\n", __func__, offset);
+ val = 0;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad register at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ val = 0;
+ break;
+ }
+ if (high) {
+ val = extract64(val, 32, 19);
+ } else {
+ val = extract64(val, 0, 32);
+ }
+ trace_aspeed_vic_read(offset, size, val);
+ return val;
+}
+
+static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size)
+{
+ AspeedVICState *s = (AspeedVICState *)opaque;
+ hwaddr n_offset;
+ bool high;
+
+ if (offset < AVIC_NEW_BASE_OFFSET) {
+ high = false;
+ n_offset = offset;
+ } else {
+ high = !!(offset & 0x4);
+ n_offset = (offset & ~0x4);
+ }
+
+ trace_aspeed_vic_write(offset, size, data);
+
+ /* Given we have members using separate enable/clear registers, deposit64()
+ * isn't quite the tool for the job. Instead, relocate the incoming bits to
+ * the required bit offset based on the provided access address
+ */
+ if (high) {
+ data &= AVIC_H_MASK;
+ data <<= 32;
+ } else {
+ data &= AVIC_L_MASK;
+ }
+
+ switch (n_offset) {
+ case 0x98: /* Interrupt Selection */
+ case 0x0c:
+ /* Register has deposit64() semantics - overwrite requested 32 bits */
+ if (high) {
+ s->select &= AVIC_L_MASK;
+ } else {
+ s->select &= ((uint64_t) AVIC_H_MASK) << 32;
+ }
+ s->select |= data;
+ break;
+ case 0xa0: /* Interrupt Enable */
+ case 0x10:
+ s->enable |= data;
+ break;
+ case 0xa8: /* Interrupt Enable Clear */
+ case 0x14:
+ s->enable &= ~data;
+ break;
+ case 0xb0: /* Software Interrupt */
+ case 0x18:
+ qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. "
+ "IRQs requested: 0x%016" PRIx64 "\n", __func__, data);
+ break;
+ case 0xb8: /* Software Interrupt Clear */
+ case 0x1c:
+ qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. "
+ "IRQs to be cleared: 0x%016" PRIx64 "\n", __func__, data);
+ break;
+ case 0xd0: /* Interrupt Event */
+ /* Register has deposit64() semantics - overwrite the top four valid
+ * IRQ bits, as only the top four IRQs (GPIOs) can change their event
+ * type */
+ if (high) {
+ s->event &= ~AVIC_EVENT_W_MASK;
+ s->event |= (data & AVIC_EVENT_W_MASK);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring invalid write to interrupt event register");
+ }
+ break;
+ case 0xd8: /* Edge Triggered Interrupt Clear */
+ case 0x38:
+ s->raw &= ~(data & ~s->sense);
+ break;
+ case 0x80: /* IRQ Status */
+ case 0x00:
+ case 0x88: /* FIQ Status */
+ case 0x04:
+ case 0x90: /* Raw Interrupt Status */
+ case 0x08:
+ case 0xc0: /* Interrupt Sensitivity */
+ case 0x24:
+ case 0xc8: /* Interrupt Both Edge Trigger Control */
+ case 0x28:
+ case 0xe0: /* Edge Triggered Interrupt Status */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Write of read-only register with offset 0x%"
+ HWADDR_PRIx "\n", __func__, offset);
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad register at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ break;
+ }
+ aspeed_vic_update(s);
+}
+
+static const MemoryRegionOps aspeed_vic_ops = {
+ .read = aspeed_vic_read,
+ .write = aspeed_vic_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .valid.unaligned = false,
+};
+
+static void aspeed_vic_reset(DeviceState *dev)
+{
+ AspeedVICState *s = ASPEED_VIC(dev);
+
+ s->level = 0;
+ s->raw = 0;
+ s->select = 0;
+ s->enable = 0;
+ s->trigger = 0;
+ s->sense = 0x1F07FFF8FFFFULL;
+ s->dual_edge = 0xF800070000ULL;
+ s->event = 0x5F07FFF8FFFFULL;
+}
+
+#define AVIC_IO_REGION_SIZE 0x20000
+
+static void aspeed_vic_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ AspeedVICState *s = ASPEED_VIC(dev);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_vic_ops, s,
+ TYPE_ASPEED_VIC, AVIC_IO_REGION_SIZE);
+
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ qdev_init_gpio_in(dev, aspeed_vic_set_irq, ASPEED_VIC_NR_IRQS);
+ sysbus_init_irq(sbd, &s->irq);
+ sysbus_init_irq(sbd, &s->fiq);
+}
+
+static const VMStateDescription vmstate_aspeed_vic = {
+ .name = "aspeed.new-vic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(level, AspeedVICState),
+ VMSTATE_UINT64(raw, AspeedVICState),
+ VMSTATE_UINT64(select, AspeedVICState),
+ VMSTATE_UINT64(enable, AspeedVICState),
+ VMSTATE_UINT64(trigger, AspeedVICState),
+ VMSTATE_UINT64(sense, AspeedVICState),
+ VMSTATE_UINT64(dual_edge, AspeedVICState),
+ VMSTATE_UINT64(event, AspeedVICState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void aspeed_vic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->realize = aspeed_vic_realize;
+ dc->reset = aspeed_vic_reset;
+ dc->desc = "ASPEED Interrupt Controller (New)";
+ dc->vmsd = &vmstate_aspeed_vic;
+}
+
+static const TypeInfo aspeed_vic_info = {
+ .name = TYPE_ASPEED_VIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AspeedVICState),
+ .class_init = aspeed_vic_class_init,
+};
+
+static void aspeed_vic_register_types(void)
+{
+ type_register_static(&aspeed_vic_info);
+}
+
+type_init(aspeed_vic_register_types);
diff --git a/hw/intc/bcm2835_ic.c b/hw/intc/bcm2835_ic.c
new file mode 100644
index 000000000..9000d995e
--- /dev/null
+++ b/hw/intc/bcm2835_ic.c
@@ -0,0 +1,243 @@
+/*
+ * Raspberry Pi emulation (c) 2012 Gregory Estrade
+ * Refactoring for Pi2 Copyright (c) 2015, Microsoft. Written by Andrew Baumann.
+ * Heavily based on pl190.c, copyright terms below:
+ *
+ * Arm PrimeCell PL190 Vector Interrupt Controller
+ *
+ * Copyright (c) 2006 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/bcm2835_ic.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "trace.h"
+
+#define GPU_IRQS 64
+#define ARM_IRQS 8
+
+#define IRQ_PENDING_BASIC 0x00 /* IRQ basic pending */
+#define IRQ_PENDING_1 0x04 /* IRQ pending 1 */
+#define IRQ_PENDING_2 0x08 /* IRQ pending 2 */
+#define FIQ_CONTROL 0x0C /* FIQ register */
+#define IRQ_ENABLE_1 0x10 /* Interrupt enable register 1 */
+#define IRQ_ENABLE_2 0x14 /* Interrupt enable register 2 */
+#define IRQ_ENABLE_BASIC 0x18 /* Base interrupt enable register */
+#define IRQ_DISABLE_1 0x1C /* Interrupt disable register 1 */
+#define IRQ_DISABLE_2 0x20 /* Interrupt disable register 2 */
+#define IRQ_DISABLE_BASIC 0x24 /* Base interrupt disable register */
+
+/* Update interrupts. */
+static void bcm2835_ic_update(BCM2835ICState *s)
+{
+ bool set = false;
+
+ if (s->fiq_enable) {
+ if (s->fiq_select >= GPU_IRQS) {
+ /* ARM IRQ */
+ set = extract32(s->arm_irq_level, s->fiq_select - GPU_IRQS, 1);
+ } else {
+ set = extract64(s->gpu_irq_level, s->fiq_select, 1);
+ }
+ }
+ qemu_set_irq(s->fiq, set);
+
+ set = (s->gpu_irq_level & s->gpu_irq_enable)
+ || (s->arm_irq_level & s->arm_irq_enable);
+ qemu_set_irq(s->irq, set);
+}
+
+static void bcm2835_ic_set_gpu_irq(void *opaque, int irq, int level)
+{
+ BCM2835ICState *s = opaque;
+
+ assert(irq >= 0 && irq < 64);
+ trace_bcm2835_ic_set_gpu_irq(irq, level);
+ s->gpu_irq_level = deposit64(s->gpu_irq_level, irq, 1, level != 0);
+ bcm2835_ic_update(s);
+}
+
+static void bcm2835_ic_set_arm_irq(void *opaque, int irq, int level)
+{
+ BCM2835ICState *s = opaque;
+
+ assert(irq >= 0 && irq < 8);
+ trace_bcm2835_ic_set_cpu_irq(irq, level);
+ s->arm_irq_level = deposit32(s->arm_irq_level, irq, 1, level != 0);
+ bcm2835_ic_update(s);
+}
+
+static const int irq_dups[] = { 7, 9, 10, 18, 19, 53, 54, 55, 56, 57, 62 };
+
+static uint64_t bcm2835_ic_read(void *opaque, hwaddr offset, unsigned size)
+{
+ BCM2835ICState *s = opaque;
+ uint32_t res = 0;
+ uint64_t gpu_pending = s->gpu_irq_level & s->gpu_irq_enable;
+ int i;
+
+ switch (offset) {
+ case IRQ_PENDING_BASIC:
+ /* bits 0-7: ARM irqs */
+ res = s->arm_irq_level & s->arm_irq_enable;
+
+ /* bits 8 & 9: pending registers 1 & 2 */
+ res |= (((uint32_t)gpu_pending) != 0) << 8;
+ res |= ((gpu_pending >> 32) != 0) << 9;
+
+ /* bits 10-20: selected GPU IRQs */
+ for (i = 0; i < ARRAY_SIZE(irq_dups); i++) {
+ res |= extract64(gpu_pending, irq_dups[i], 1) << (i + 10);
+ }
+ break;
+ case IRQ_PENDING_1:
+ res = gpu_pending;
+ break;
+ case IRQ_PENDING_2:
+ res = gpu_pending >> 32;
+ break;
+ case FIQ_CONTROL:
+ res = (s->fiq_enable << 7) | s->fiq_select;
+ break;
+ case IRQ_ENABLE_1:
+ res = s->gpu_irq_enable;
+ break;
+ case IRQ_ENABLE_2:
+ res = s->gpu_irq_enable >> 32;
+ break;
+ case IRQ_ENABLE_BASIC:
+ res = s->arm_irq_enable;
+ break;
+ case IRQ_DISABLE_1:
+ res = ~s->gpu_irq_enable;
+ break;
+ case IRQ_DISABLE_2:
+ res = ~s->gpu_irq_enable >> 32;
+ break;
+ case IRQ_DISABLE_BASIC:
+ res = ~s->arm_irq_enable;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n",
+ __func__, offset);
+ return 0;
+ }
+
+ return res;
+}
+
+static void bcm2835_ic_write(void *opaque, hwaddr offset, uint64_t val,
+ unsigned size)
+{
+ BCM2835ICState *s = opaque;
+
+ switch (offset) {
+ case FIQ_CONTROL:
+ s->fiq_select = extract32(val, 0, 7);
+ s->fiq_enable = extract32(val, 7, 1);
+ break;
+ case IRQ_ENABLE_1:
+ s->gpu_irq_enable |= val;
+ break;
+ case IRQ_ENABLE_2:
+ s->gpu_irq_enable |= val << 32;
+ break;
+ case IRQ_ENABLE_BASIC:
+ s->arm_irq_enable |= val & 0xff;
+ break;
+ case IRQ_DISABLE_1:
+ s->gpu_irq_enable &= ~val;
+ break;
+ case IRQ_DISABLE_2:
+ s->gpu_irq_enable &= ~(val << 32);
+ break;
+ case IRQ_DISABLE_BASIC:
+ s->arm_irq_enable &= ~val & 0xff;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n",
+ __func__, offset);
+ return;
+ }
+ bcm2835_ic_update(s);
+}
+
+static const MemoryRegionOps bcm2835_ic_ops = {
+ .read = bcm2835_ic_read,
+ .write = bcm2835_ic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static void bcm2835_ic_reset(DeviceState *d)
+{
+ BCM2835ICState *s = BCM2835_IC(d);
+
+ s->gpu_irq_enable = 0;
+ s->arm_irq_enable = 0;
+ s->fiq_enable = false;
+ s->fiq_select = 0;
+}
+
+static void bcm2835_ic_init(Object *obj)
+{
+ BCM2835ICState *s = BCM2835_IC(obj);
+
+ memory_region_init_io(&s->iomem, obj, &bcm2835_ic_ops, s, TYPE_BCM2835_IC,
+ 0x200);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
+
+ qdev_init_gpio_in_named(DEVICE(s), bcm2835_ic_set_gpu_irq,
+ BCM2835_IC_GPU_IRQ, GPU_IRQS);
+ qdev_init_gpio_in_named(DEVICE(s), bcm2835_ic_set_arm_irq,
+ BCM2835_IC_ARM_IRQ, ARM_IRQS);
+
+ sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq);
+ sysbus_init_irq(SYS_BUS_DEVICE(s), &s->fiq);
+}
+
+static const VMStateDescription vmstate_bcm2835_ic = {
+ .name = TYPE_BCM2835_IC,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(gpu_irq_level, BCM2835ICState),
+ VMSTATE_UINT64(gpu_irq_enable, BCM2835ICState),
+ VMSTATE_UINT8(arm_irq_level, BCM2835ICState),
+ VMSTATE_UINT8(arm_irq_enable, BCM2835ICState),
+ VMSTATE_BOOL(fiq_enable, BCM2835ICState),
+ VMSTATE_UINT8(fiq_select, BCM2835ICState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void bcm2835_ic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = bcm2835_ic_reset;
+ dc->vmsd = &vmstate_bcm2835_ic;
+}
+
+static TypeInfo bcm2835_ic_info = {
+ .name = TYPE_BCM2835_IC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(BCM2835ICState),
+ .class_init = bcm2835_ic_class_init,
+ .instance_init = bcm2835_ic_init,
+};
+
+static void bcm2835_ic_register_types(void)
+{
+ type_register_static(&bcm2835_ic_info);
+}
+
+type_init(bcm2835_ic_register_types)
diff --git a/hw/intc/bcm2836_control.c b/hw/intc/bcm2836_control.c
new file mode 100644
index 000000000..2ead76ffd
--- /dev/null
+++ b/hw/intc/bcm2836_control.c
@@ -0,0 +1,408 @@
+/*
+ * Rasperry Pi 2 emulation ARM control logic module.
+ * Copyright (c) 2015, Microsoft
+ * Written by Andrew Baumann
+ *
+ * Based on bcm2835_ic.c (Raspberry Pi emulation) (c) 2012 Gregory Estrade
+ *
+ * At present, only implements interrupt routing, and mailboxes (i.e.,
+ * not PMU interrupt, or AXI counters).
+ *
+ * ARM Local Timer IRQ Copyright (c) 2019. Zoltán Baldaszti
+ *
+ * Ref:
+ * https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2836/QA7_rev3.4.pdf
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/bcm2836_control.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+
+#define REG_GPU_ROUTE 0x0c
+#define REG_LOCALTIMERROUTING 0x24
+#define REG_LOCALTIMERCONTROL 0x34
+#define REG_LOCALTIMERACK 0x38
+#define REG_TIMERCONTROL 0x40
+#define REG_MBOXCONTROL 0x50
+#define REG_IRQSRC 0x60
+#define REG_FIQSRC 0x70
+#define REG_MBOX0_WR 0x80
+#define REG_MBOX0_RDCLR 0xc0
+#define REG_LIMIT 0x100
+
+#define IRQ_BIT(cntrl, num) (((cntrl) & (1 << (num))) != 0)
+#define FIQ_BIT(cntrl, num) (((cntrl) & (1 << ((num) + 4))) != 0)
+
+#define IRQ_CNTPSIRQ 0
+#define IRQ_CNTPNSIRQ 1
+#define IRQ_CNTHPIRQ 2
+#define IRQ_CNTVIRQ 3
+#define IRQ_MAILBOX0 4
+#define IRQ_MAILBOX1 5
+#define IRQ_MAILBOX2 6
+#define IRQ_MAILBOX3 7
+#define IRQ_GPU 8
+#define IRQ_PMU 9
+#define IRQ_AXI 10
+#define IRQ_TIMER 11
+#define IRQ_MAX IRQ_TIMER
+
+#define LOCALTIMER_FREQ 38400000
+#define LOCALTIMER_INTFLAG (1 << 31)
+#define LOCALTIMER_RELOAD (1 << 30)
+#define LOCALTIMER_INTENABLE (1 << 29)
+#define LOCALTIMER_ENABLE (1 << 28)
+#define LOCALTIMER_VALUE(x) ((x) & 0xfffffff)
+
+static void deliver_local(BCM2836ControlState *s, uint8_t core, uint8_t irq,
+ uint32_t controlreg, uint8_t controlidx)
+{
+ if (FIQ_BIT(controlreg, controlidx)) {
+ /* deliver a FIQ */
+ s->fiqsrc[core] |= (uint32_t)1 << irq;
+ } else if (IRQ_BIT(controlreg, controlidx)) {
+ /* deliver an IRQ */
+ s->irqsrc[core] |= (uint32_t)1 << irq;
+ } else {
+ /* the interrupt is masked */
+ }
+}
+
+/* Update interrupts. */
+static void bcm2836_control_update(BCM2836ControlState *s)
+{
+ int i, j;
+
+ /* reset pending IRQs/FIQs */
+ for (i = 0; i < BCM2836_NCORES; i++) {
+ s->irqsrc[i] = s->fiqsrc[i] = 0;
+ }
+
+ /* apply routing logic, update status regs */
+ if (s->gpu_irq) {
+ assert(s->route_gpu_irq < BCM2836_NCORES);
+ s->irqsrc[s->route_gpu_irq] |= (uint32_t)1 << IRQ_GPU;
+ }
+
+ if (s->gpu_fiq) {
+ assert(s->route_gpu_fiq < BCM2836_NCORES);
+ s->fiqsrc[s->route_gpu_fiq] |= (uint32_t)1 << IRQ_GPU;
+ }
+
+ /*
+ * handle the control module 'local timer' interrupt for one of the
+ * cores' IRQ/FIQ; this is distinct from the per-CPU timer
+ * interrupts handled below.
+ */
+ if ((s->local_timer_control & LOCALTIMER_INTENABLE) &&
+ (s->local_timer_control & LOCALTIMER_INTFLAG)) {
+ if (s->route_localtimer & 4) {
+ s->fiqsrc[(s->route_localtimer & 3)] |= (uint32_t)1 << IRQ_TIMER;
+ } else {
+ s->irqsrc[(s->route_localtimer & 3)] |= (uint32_t)1 << IRQ_TIMER;
+ }
+ }
+
+ for (i = 0; i < BCM2836_NCORES; i++) {
+ /* handle local timer interrupts for this core */
+ if (s->timerirqs[i]) {
+ assert(s->timerirqs[i] < (1 << (IRQ_CNTVIRQ + 1))); /* sane mask? */
+ for (j = 0; j <= IRQ_CNTVIRQ; j++) {
+ if ((s->timerirqs[i] & (1 << j)) != 0) {
+ /* local interrupt j is set */
+ deliver_local(s, i, j, s->timercontrol[i], j);
+ }
+ }
+ }
+
+ /* handle mailboxes for this core */
+ for (j = 0; j < BCM2836_MBPERCORE; j++) {
+ if (s->mailboxes[i * BCM2836_MBPERCORE + j] != 0) {
+ /* mailbox j is set */
+ deliver_local(s, i, j + IRQ_MAILBOX0, s->mailboxcontrol[i], j);
+ }
+ }
+ }
+
+ /* call set_irq appropriately for each output */
+ for (i = 0; i < BCM2836_NCORES; i++) {
+ qemu_set_irq(s->irq[i], s->irqsrc[i] != 0);
+ qemu_set_irq(s->fiq[i], s->fiqsrc[i] != 0);
+ }
+}
+
+static void bcm2836_control_set_local_irq(void *opaque, int core, int local_irq,
+ int level)
+{
+ BCM2836ControlState *s = opaque;
+
+ assert(core >= 0 && core < BCM2836_NCORES);
+ assert(local_irq >= 0 && local_irq <= IRQ_CNTVIRQ);
+
+ s->timerirqs[core] = deposit32(s->timerirqs[core], local_irq, 1, !!level);
+
+ bcm2836_control_update(s);
+}
+
+/* XXX: the following wrapper functions are a kludgy workaround,
+ * needed because I can't seem to pass useful information in the "irq"
+ * parameter when using named interrupts. Feel free to clean this up!
+ */
+
+static void bcm2836_control_set_local_irq0(void *opaque, int core, int level)
+{
+ bcm2836_control_set_local_irq(opaque, core, IRQ_CNTPSIRQ, level);
+}
+
+static void bcm2836_control_set_local_irq1(void *opaque, int core, int level)
+{
+ bcm2836_control_set_local_irq(opaque, core, IRQ_CNTPNSIRQ, level);
+}
+
+static void bcm2836_control_set_local_irq2(void *opaque, int core, int level)
+{
+ bcm2836_control_set_local_irq(opaque, core, IRQ_CNTHPIRQ, level);
+}
+
+static void bcm2836_control_set_local_irq3(void *opaque, int core, int level)
+{
+ bcm2836_control_set_local_irq(opaque, core, IRQ_CNTVIRQ, level);
+}
+
+static void bcm2836_control_set_gpu_irq(void *opaque, int irq, int level)
+{
+ BCM2836ControlState *s = opaque;
+
+ s->gpu_irq = level;
+
+ bcm2836_control_update(s);
+}
+
+static void bcm2836_control_set_gpu_fiq(void *opaque, int irq, int level)
+{
+ BCM2836ControlState *s = opaque;
+
+ s->gpu_fiq = level;
+
+ bcm2836_control_update(s);
+}
+
+static void bcm2836_control_local_timer_set_next(void *opaque)
+{
+ BCM2836ControlState *s = opaque;
+ uint64_t next_event;
+
+ assert(LOCALTIMER_VALUE(s->local_timer_control) > 0);
+
+ next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ muldiv64(LOCALTIMER_VALUE(s->local_timer_control),
+ NANOSECONDS_PER_SECOND, LOCALTIMER_FREQ);
+ timer_mod(&s->timer, next_event);
+}
+
+static void bcm2836_control_local_timer_tick(void *opaque)
+{
+ BCM2836ControlState *s = opaque;
+
+ bcm2836_control_local_timer_set_next(s);
+
+ s->local_timer_control |= LOCALTIMER_INTFLAG;
+ bcm2836_control_update(s);
+}
+
+static void bcm2836_control_local_timer_control(void *opaque, uint32_t val)
+{
+ BCM2836ControlState *s = opaque;
+
+ s->local_timer_control = val;
+ if (val & LOCALTIMER_ENABLE) {
+ bcm2836_control_local_timer_set_next(s);
+ } else {
+ timer_del(&s->timer);
+ }
+}
+
+static void bcm2836_control_local_timer_ack(void *opaque, uint32_t val)
+{
+ BCM2836ControlState *s = opaque;
+
+ if (val & LOCALTIMER_INTFLAG) {
+ s->local_timer_control &= ~LOCALTIMER_INTFLAG;
+ }
+ if ((val & LOCALTIMER_RELOAD) &&
+ (s->local_timer_control & LOCALTIMER_ENABLE)) {
+ bcm2836_control_local_timer_set_next(s);
+ }
+}
+
+static uint64_t bcm2836_control_read(void *opaque, hwaddr offset, unsigned size)
+{
+ BCM2836ControlState *s = opaque;
+
+ if (offset == REG_GPU_ROUTE) {
+ assert(s->route_gpu_fiq < BCM2836_NCORES
+ && s->route_gpu_irq < BCM2836_NCORES);
+ return ((uint32_t)s->route_gpu_fiq << 2) | s->route_gpu_irq;
+ } else if (offset == REG_LOCALTIMERROUTING) {
+ return s->route_localtimer;
+ } else if (offset == REG_LOCALTIMERCONTROL) {
+ return s->local_timer_control;
+ } else if (offset == REG_LOCALTIMERACK) {
+ return 0;
+ } else if (offset >= REG_TIMERCONTROL && offset < REG_MBOXCONTROL) {
+ return s->timercontrol[(offset - REG_TIMERCONTROL) >> 2];
+ } else if (offset >= REG_MBOXCONTROL && offset < REG_IRQSRC) {
+ return s->mailboxcontrol[(offset - REG_MBOXCONTROL) >> 2];
+ } else if (offset >= REG_IRQSRC && offset < REG_FIQSRC) {
+ return s->irqsrc[(offset - REG_IRQSRC) >> 2];
+ } else if (offset >= REG_FIQSRC && offset < REG_MBOX0_WR) {
+ return s->fiqsrc[(offset - REG_FIQSRC) >> 2];
+ } else if (offset >= REG_MBOX0_RDCLR && offset < REG_LIMIT) {
+ return s->mailboxes[(offset - REG_MBOX0_RDCLR) >> 2];
+ } else {
+ qemu_log_mask(LOG_UNIMP, "%s: Unsupported offset 0x%"HWADDR_PRIx"\n",
+ __func__, offset);
+ return 0;
+ }
+}
+
+static void bcm2836_control_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ BCM2836ControlState *s = opaque;
+
+ if (offset == REG_GPU_ROUTE) {
+ s->route_gpu_irq = val & 0x3;
+ s->route_gpu_fiq = (val >> 2) & 0x3;
+ } else if (offset == REG_LOCALTIMERROUTING) {
+ s->route_localtimer = val & 7;
+ } else if (offset == REG_LOCALTIMERCONTROL) {
+ bcm2836_control_local_timer_control(s, val);
+ } else if (offset == REG_LOCALTIMERACK) {
+ bcm2836_control_local_timer_ack(s, val);
+ } else if (offset >= REG_TIMERCONTROL && offset < REG_MBOXCONTROL) {
+ s->timercontrol[(offset - REG_TIMERCONTROL) >> 2] = val & 0xff;
+ } else if (offset >= REG_MBOXCONTROL && offset < REG_IRQSRC) {
+ s->mailboxcontrol[(offset - REG_MBOXCONTROL) >> 2] = val & 0xff;
+ } else if (offset >= REG_MBOX0_WR && offset < REG_MBOX0_RDCLR) {
+ s->mailboxes[(offset - REG_MBOX0_WR) >> 2] |= val;
+ } else if (offset >= REG_MBOX0_RDCLR && offset < REG_LIMIT) {
+ s->mailboxes[(offset - REG_MBOX0_RDCLR) >> 2] &= ~val;
+ } else {
+ qemu_log_mask(LOG_UNIMP, "%s: Unsupported offset 0x%"HWADDR_PRIx
+ " value 0x%"PRIx64"\n",
+ __func__, offset, val);
+ return;
+ }
+
+ bcm2836_control_update(s);
+}
+
+static const MemoryRegionOps bcm2836_control_ops = {
+ .read = bcm2836_control_read,
+ .write = bcm2836_control_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static void bcm2836_control_reset(DeviceState *d)
+{
+ BCM2836ControlState *s = BCM2836_CONTROL(d);
+ int i;
+
+ s->route_gpu_irq = s->route_gpu_fiq = 0;
+
+ timer_del(&s->timer);
+ s->route_localtimer = 0;
+ s->local_timer_control = 0;
+
+ for (i = 0; i < BCM2836_NCORES; i++) {
+ s->timercontrol[i] = 0;
+ s->mailboxcontrol[i] = 0;
+ }
+
+ for (i = 0; i < BCM2836_NCORES * BCM2836_MBPERCORE; i++) {
+ s->mailboxes[i] = 0;
+ }
+}
+
+static void bcm2836_control_init(Object *obj)
+{
+ BCM2836ControlState *s = BCM2836_CONTROL(obj);
+ DeviceState *dev = DEVICE(obj);
+
+ memory_region_init_io(&s->iomem, obj, &bcm2836_control_ops, s,
+ TYPE_BCM2836_CONTROL, REG_LIMIT);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
+
+ /* inputs from each CPU core */
+ qdev_init_gpio_in_named(dev, bcm2836_control_set_local_irq0, "cntpsirq",
+ BCM2836_NCORES);
+ qdev_init_gpio_in_named(dev, bcm2836_control_set_local_irq1, "cntpnsirq",
+ BCM2836_NCORES);
+ qdev_init_gpio_in_named(dev, bcm2836_control_set_local_irq2, "cnthpirq",
+ BCM2836_NCORES);
+ qdev_init_gpio_in_named(dev, bcm2836_control_set_local_irq3, "cntvirq",
+ BCM2836_NCORES);
+
+ /* IRQ and FIQ inputs from upstream bcm2835 controller */
+ qdev_init_gpio_in_named(dev, bcm2836_control_set_gpu_irq, "gpu-irq", 1);
+ qdev_init_gpio_in_named(dev, bcm2836_control_set_gpu_fiq, "gpu-fiq", 1);
+
+ /* outputs to CPU cores */
+ qdev_init_gpio_out_named(dev, s->irq, "irq", BCM2836_NCORES);
+ qdev_init_gpio_out_named(dev, s->fiq, "fiq", BCM2836_NCORES);
+
+ /* create a qemu virtual timer */
+ timer_init_ns(&s->timer, QEMU_CLOCK_VIRTUAL,
+ bcm2836_control_local_timer_tick, s);
+}
+
+static const VMStateDescription vmstate_bcm2836_control = {
+ .name = TYPE_BCM2836_CONTROL,
+ .version_id = 2,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(mailboxes, BCM2836ControlState,
+ BCM2836_NCORES * BCM2836_MBPERCORE),
+ VMSTATE_UINT8(route_gpu_irq, BCM2836ControlState),
+ VMSTATE_UINT8(route_gpu_fiq, BCM2836ControlState),
+ VMSTATE_UINT32_ARRAY(timercontrol, BCM2836ControlState, BCM2836_NCORES),
+ VMSTATE_UINT32_ARRAY(mailboxcontrol, BCM2836ControlState,
+ BCM2836_NCORES),
+ VMSTATE_TIMER_V(timer, BCM2836ControlState, 2),
+ VMSTATE_UINT32_V(local_timer_control, BCM2836ControlState, 2),
+ VMSTATE_UINT8_V(route_localtimer, BCM2836ControlState, 2),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void bcm2836_control_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = bcm2836_control_reset;
+ dc->vmsd = &vmstate_bcm2836_control;
+}
+
+static TypeInfo bcm2836_control_info = {
+ .name = TYPE_BCM2836_CONTROL,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(BCM2836ControlState),
+ .class_init = bcm2836_control_class_init,
+ .instance_init = bcm2836_control_init,
+};
+
+static void bcm2836_control_register_types(void)
+{
+ type_register_static(&bcm2836_control_info);
+}
+
+type_init(bcm2836_control_register_types)
diff --git a/hw/intc/etraxfs_pic.c b/hw/intc/etraxfs_pic.c
new file mode 100644
index 000000000..bd37d1cca
--- /dev/null
+++ b/hw/intc/etraxfs_pic.c
@@ -0,0 +1,172 @@
+/*
+ * QEMU ETRAX Interrupt Controller.
+ *
+ * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "qemu/module.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "qom/object.h"
+
+#define D(x)
+
+#define R_RW_MASK 0
+#define R_R_VECT 1
+#define R_R_MASKED_VECT 2
+#define R_R_NMI 3
+#define R_R_GURU 4
+#define R_MAX 5
+
+#define TYPE_ETRAX_FS_PIC "etraxfs-pic"
+DECLARE_INSTANCE_CHECKER(struct etrax_pic, ETRAX_FS_PIC,
+ TYPE_ETRAX_FS_PIC)
+
+struct etrax_pic
+{
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+ qemu_irq parent_irq;
+ qemu_irq parent_nmi;
+ uint32_t regs[R_MAX];
+};
+
+static void pic_update(struct etrax_pic *fs)
+{
+ uint32_t vector = 0;
+ int i;
+
+ fs->regs[R_R_MASKED_VECT] = fs->regs[R_R_VECT] & fs->regs[R_RW_MASK];
+
+ /* The ETRAX interrupt controller signals interrupts to the core
+ through an interrupt request wire and an irq vector bus. If
+ multiple interrupts are simultaneously active it chooses vector
+ 0x30 and lets the sw choose the priorities. */
+ if (fs->regs[R_R_MASKED_VECT]) {
+ uint32_t mv = fs->regs[R_R_MASKED_VECT];
+ for (i = 0; i < 31; i++) {
+ if (mv & 1) {
+ vector = 0x31 + i;
+ /* Check for multiple interrupts. */
+ if (mv > 1)
+ vector = 0x30;
+ break;
+ }
+ mv >>= 1;
+ }
+ }
+
+ qemu_set_irq(fs->parent_irq, vector);
+}
+
+static uint64_t
+pic_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ struct etrax_pic *fs = opaque;
+ uint32_t rval;
+
+ rval = fs->regs[addr >> 2];
+ D(printf("%s %x=%x\n", __func__, addr, rval));
+ return rval;
+}
+
+static void pic_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned int size)
+{
+ struct etrax_pic *fs = opaque;
+ D(printf("%s addr=%x val=%x\n", __func__, addr, value));
+
+ if (addr == R_RW_MASK) {
+ fs->regs[R_RW_MASK] = value;
+ pic_update(fs);
+ }
+}
+
+static const MemoryRegionOps pic_ops = {
+ .read = pic_read,
+ .write = pic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4
+ }
+};
+
+static void nmi_handler(void *opaque, int irq, int level)
+{
+ struct etrax_pic *fs = (void *)opaque;
+ uint32_t mask;
+
+ mask = 1 << irq;
+ if (level)
+ fs->regs[R_R_NMI] |= mask;
+ else
+ fs->regs[R_R_NMI] &= ~mask;
+
+ qemu_set_irq(fs->parent_nmi, !!fs->regs[R_R_NMI]);
+}
+
+static void irq_handler(void *opaque, int irq, int level)
+{
+ struct etrax_pic *fs = (void *)opaque;
+
+ if (irq >= 30) {
+ nmi_handler(opaque, irq, level);
+ return;
+ }
+
+ irq -= 1;
+ fs->regs[R_R_VECT] &= ~(1 << irq);
+ fs->regs[R_R_VECT] |= (!!level << irq);
+ pic_update(fs);
+}
+
+static void etraxfs_pic_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ struct etrax_pic *s = ETRAX_FS_PIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ qdev_init_gpio_in(dev, irq_handler, 32);
+ sysbus_init_irq(sbd, &s->parent_irq);
+ sysbus_init_irq(sbd, &s->parent_nmi);
+
+ memory_region_init_io(&s->mmio, obj, &pic_ops, s,
+ "etraxfs-pic", R_MAX * 4);
+ sysbus_init_mmio(sbd, &s->mmio);
+}
+
+static const TypeInfo etraxfs_pic_info = {
+ .name = TYPE_ETRAX_FS_PIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(struct etrax_pic),
+ .instance_init = etraxfs_pic_init,
+};
+
+static void etraxfs_pic_register_types(void)
+{
+ type_register_static(&etraxfs_pic_info);
+}
+
+type_init(etraxfs_pic_register_types)
diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c
new file mode 100644
index 000000000..4534ee248
--- /dev/null
+++ b/hw/intc/exynos4210_combiner.c
@@ -0,0 +1,461 @@
+/*
+ * Samsung exynos4210 Interrupt Combiner
+ *
+ * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd.
+ * All rights reserved.
+ *
+ * Evgeny Voevodin <e.voevodin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Exynos4210 Combiner represents an OR gate for SOC's IRQ lines. It combines
+ * IRQ sources into groups and provides signal output to GIC from each group. It
+ * is driven by common mask and enable/disable logic. Take a note that not all
+ * IRQs are passed to GIC through Combiner.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+
+#include "hw/arm/exynos4210.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "qom/object.h"
+
+//#define DEBUG_COMBINER
+
+#ifdef DEBUG_COMBINER
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stdout, "COMBINER: [%s:%d] " fmt, __func__ , __LINE__, \
+ ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) do {} while (0)
+#endif
+
+#define IIC_NGRP 64 /* Internal Interrupt Combiner
+ Groups number */
+#define IIC_NIRQ (IIC_NGRP * 8)/* Internal Interrupt Combiner
+ Interrupts number */
+#define IIC_REGION_SIZE 0x108 /* Size of memory mapped region */
+#define IIC_REGSET_SIZE 0x41
+
+/*
+ * State for each output signal of internal combiner
+ */
+typedef struct CombinerGroupState {
+ uint8_t src_mask; /* 1 - source enabled, 0 - disabled */
+ uint8_t src_pending; /* Pending source interrupts before masking */
+} CombinerGroupState;
+
+#define TYPE_EXYNOS4210_COMBINER "exynos4210.combiner"
+OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210CombinerState, EXYNOS4210_COMBINER)
+
+struct Exynos4210CombinerState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+
+ struct CombinerGroupState group[IIC_NGRP];
+ uint32_t reg_set[IIC_REGSET_SIZE];
+ uint32_t icipsr[2];
+ uint32_t external; /* 1 means that this combiner is external */
+
+ qemu_irq output_irq[IIC_NGRP];
+};
+
+static const VMStateDescription vmstate_exynos4210_combiner_group_state = {
+ .name = "exynos4210.combiner.groupstate",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(src_mask, CombinerGroupState),
+ VMSTATE_UINT8(src_pending, CombinerGroupState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_exynos4210_combiner = {
+ .name = "exynos4210.combiner",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(group, Exynos4210CombinerState, IIC_NGRP, 0,
+ vmstate_exynos4210_combiner_group_state, CombinerGroupState),
+ VMSTATE_UINT32_ARRAY(reg_set, Exynos4210CombinerState,
+ IIC_REGSET_SIZE),
+ VMSTATE_UINT32_ARRAY(icipsr, Exynos4210CombinerState, 2),
+ VMSTATE_UINT32(external, Exynos4210CombinerState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/*
+ * Get Combiner input GPIO into irqs structure
+ */
+void exynos4210_combiner_get_gpioin(Exynos4210Irq *irqs, DeviceState *dev,
+ int ext)
+{
+ int n;
+ int bit;
+ int max;
+ qemu_irq *irq;
+
+ max = ext ? EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ :
+ EXYNOS4210_MAX_INT_COMBINER_IN_IRQ;
+ irq = ext ? irqs->ext_combiner_irq : irqs->int_combiner_irq;
+
+ /*
+ * Some IRQs of Int/External Combiner are going to two Combiners groups,
+ * so let split them.
+ */
+ for (n = 0; n < max; n++) {
+
+ bit = EXYNOS4210_COMBINER_GET_BIT_NUM(n);
+
+ switch (n) {
+ /* MDNIE_LCD1 INTG1 */
+ case EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 0) ...
+ EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 3):
+ irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n),
+ irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(0, bit + 4)]);
+ continue;
+
+ /* TMU INTG3 */
+ case EXYNOS4210_COMBINER_GET_IRQ_NUM(3, 4):
+ irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n),
+ irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(2, bit)]);
+ continue;
+
+ /* LCD1 INTG12 */
+ case EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 0) ...
+ EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 3):
+ irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n),
+ irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(11, bit + 4)]);
+ continue;
+
+ /* Multi-Core Timer INTG12 */
+ case EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 4) ...
+ EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 8):
+ irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n),
+ irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]);
+ continue;
+
+ /* Multi-Core Timer INTG35 */
+ case EXYNOS4210_COMBINER_GET_IRQ_NUM(35, 4) ...
+ EXYNOS4210_COMBINER_GET_IRQ_NUM(35, 8):
+ irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n),
+ irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]);
+ continue;
+
+ /* Multi-Core Timer INTG51 */
+ case EXYNOS4210_COMBINER_GET_IRQ_NUM(51, 4) ...
+ EXYNOS4210_COMBINER_GET_IRQ_NUM(51, 8):
+ irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n),
+ irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]);
+ continue;
+
+ /* Multi-Core Timer INTG53 */
+ case EXYNOS4210_COMBINER_GET_IRQ_NUM(53, 4) ...
+ EXYNOS4210_COMBINER_GET_IRQ_NUM(53, 8):
+ irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n),
+ irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]);
+ continue;
+ }
+
+ irq[n] = qdev_get_gpio_in(dev, n);
+ }
+}
+
+static uint64_t
+exynos4210_combiner_read(void *opaque, hwaddr offset, unsigned size)
+{
+ struct Exynos4210CombinerState *s =
+ (struct Exynos4210CombinerState *)opaque;
+ uint32_t req_quad_base_n; /* Base of registers quad. Multiply it by 4 and
+ get a start of corresponding group quad */
+ uint32_t grp_quad_base_n; /* Base of group quad */
+ uint32_t reg_n; /* Register number inside the quad */
+ uint32_t val;
+
+ req_quad_base_n = offset >> 4;
+ grp_quad_base_n = req_quad_base_n << 2;
+ reg_n = (offset - (req_quad_base_n << 4)) >> 2;
+
+ if (req_quad_base_n >= IIC_NGRP) {
+ /* Read of ICIPSR register */
+ return s->icipsr[reg_n];
+ }
+
+ val = 0;
+
+ switch (reg_n) {
+ /* IISTR */
+ case 2:
+ val |= s->group[grp_quad_base_n].src_pending;
+ val |= s->group[grp_quad_base_n + 1].src_pending << 8;
+ val |= s->group[grp_quad_base_n + 2].src_pending << 16;
+ val |= s->group[grp_quad_base_n + 3].src_pending << 24;
+ break;
+ /* IIMSR */
+ case 3:
+ val |= s->group[grp_quad_base_n].src_mask &
+ s->group[grp_quad_base_n].src_pending;
+ val |= (s->group[grp_quad_base_n + 1].src_mask &
+ s->group[grp_quad_base_n + 1].src_pending) << 8;
+ val |= (s->group[grp_quad_base_n + 2].src_mask &
+ s->group[grp_quad_base_n + 2].src_pending) << 16;
+ val |= (s->group[grp_quad_base_n + 3].src_mask &
+ s->group[grp_quad_base_n + 3].src_pending) << 24;
+ break;
+ default:
+ if (offset >> 2 >= IIC_REGSET_SIZE) {
+ hw_error("exynos4210.combiner: overflow of reg_set by 0x"
+ TARGET_FMT_plx "offset\n", offset);
+ }
+ val = s->reg_set[offset >> 2];
+ }
+ return val;
+}
+
+static void exynos4210_combiner_update(void *opaque, uint8_t group_n)
+{
+ struct Exynos4210CombinerState *s =
+ (struct Exynos4210CombinerState *)opaque;
+
+ /* Send interrupt if needed */
+ if (s->group[group_n].src_mask & s->group[group_n].src_pending) {
+#ifdef DEBUG_COMBINER
+ if (group_n != 26) {
+ /* skip uart */
+ DPRINTF("%s raise IRQ[%d]\n", s->external ? "EXT" : "INT", group_n);
+ }
+#endif
+
+ /* Set Combiner interrupt pending status after masking */
+ if (group_n >= 32) {
+ s->icipsr[1] |= 1 << (group_n - 32);
+ } else {
+ s->icipsr[0] |= 1 << group_n;
+ }
+
+ qemu_irq_raise(s->output_irq[group_n]);
+ } else {
+#ifdef DEBUG_COMBINER
+ if (group_n != 26) {
+ /* skip uart */
+ DPRINTF("%s lower IRQ[%d]\n", s->external ? "EXT" : "INT", group_n);
+ }
+#endif
+
+ /* Set Combiner interrupt pending status after masking */
+ if (group_n >= 32) {
+ s->icipsr[1] &= ~(1 << (group_n - 32));
+ } else {
+ s->icipsr[0] &= ~(1 << group_n);
+ }
+
+ qemu_irq_lower(s->output_irq[group_n]);
+ }
+}
+
+static void exynos4210_combiner_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ struct Exynos4210CombinerState *s =
+ (struct Exynos4210CombinerState *)opaque;
+ uint32_t req_quad_base_n; /* Base of registers quad. Multiply it by 4 and
+ get a start of corresponding group quad */
+ uint32_t grp_quad_base_n; /* Base of group quad */
+ uint32_t reg_n; /* Register number inside the quad */
+
+ req_quad_base_n = offset >> 4;
+ grp_quad_base_n = req_quad_base_n << 2;
+ reg_n = (offset - (req_quad_base_n << 4)) >> 2;
+
+ if (req_quad_base_n >= IIC_NGRP) {
+ hw_error("exynos4210.combiner: unallowed write access at offset 0x"
+ TARGET_FMT_plx "\n", offset);
+ return;
+ }
+
+ if (reg_n > 1) {
+ hw_error("exynos4210.combiner: unallowed write access at offset 0x"
+ TARGET_FMT_plx "\n", offset);
+ return;
+ }
+
+ if (offset >> 2 >= IIC_REGSET_SIZE) {
+ hw_error("exynos4210.combiner: overflow of reg_set by 0x"
+ TARGET_FMT_plx "offset\n", offset);
+ }
+ s->reg_set[offset >> 2] = val;
+
+ switch (reg_n) {
+ /* IIESR */
+ case 0:
+ /* FIXME: what if irq is pending, allowed by mask, and we allow it
+ * again. Interrupt will rise again! */
+
+ DPRINTF("%s enable IRQ for groups %d, %d, %d, %d\n",
+ s->external ? "EXT" : "INT",
+ grp_quad_base_n,
+ grp_quad_base_n + 1,
+ grp_quad_base_n + 2,
+ grp_quad_base_n + 3);
+
+ /* Enable interrupt sources */
+ s->group[grp_quad_base_n].src_mask |= val & 0xFF;
+ s->group[grp_quad_base_n + 1].src_mask |= (val & 0xFF00) >> 8;
+ s->group[grp_quad_base_n + 2].src_mask |= (val & 0xFF0000) >> 16;
+ s->group[grp_quad_base_n + 3].src_mask |= (val & 0xFF000000) >> 24;
+
+ exynos4210_combiner_update(s, grp_quad_base_n);
+ exynos4210_combiner_update(s, grp_quad_base_n + 1);
+ exynos4210_combiner_update(s, grp_quad_base_n + 2);
+ exynos4210_combiner_update(s, grp_quad_base_n + 3);
+ break;
+ /* IIECR */
+ case 1:
+ DPRINTF("%s disable IRQ for groups %d, %d, %d, %d\n",
+ s->external ? "EXT" : "INT",
+ grp_quad_base_n,
+ grp_quad_base_n + 1,
+ grp_quad_base_n + 2,
+ grp_quad_base_n + 3);
+
+ /* Disable interrupt sources */
+ s->group[grp_quad_base_n].src_mask &= ~(val & 0xFF);
+ s->group[grp_quad_base_n + 1].src_mask &= ~((val & 0xFF00) >> 8);
+ s->group[grp_quad_base_n + 2].src_mask &= ~((val & 0xFF0000) >> 16);
+ s->group[grp_quad_base_n + 3].src_mask &= ~((val & 0xFF000000) >> 24);
+
+ exynos4210_combiner_update(s, grp_quad_base_n);
+ exynos4210_combiner_update(s, grp_quad_base_n + 1);
+ exynos4210_combiner_update(s, grp_quad_base_n + 2);
+ exynos4210_combiner_update(s, grp_quad_base_n + 3);
+ break;
+ default:
+ hw_error("exynos4210.combiner: unallowed write access at offset 0x"
+ TARGET_FMT_plx "\n", offset);
+ break;
+ }
+}
+
+/* Get combiner group and bit from irq number */
+static uint8_t get_combiner_group_and_bit(int irq, uint8_t *bit)
+{
+ *bit = irq - ((irq >> 3) << 3);
+ return irq >> 3;
+}
+
+/* Process a change in an external IRQ input. */
+static void exynos4210_combiner_handler(void *opaque, int irq, int level)
+{
+ struct Exynos4210CombinerState *s =
+ (struct Exynos4210CombinerState *)opaque;
+ uint8_t bit_n, group_n;
+
+ group_n = get_combiner_group_and_bit(irq, &bit_n);
+
+ if (s->external && group_n >= EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ) {
+ DPRINTF("%s unallowed IRQ group 0x%x\n", s->external ? "EXT" : "INT"
+ , group_n);
+ return;
+ }
+
+ if (level) {
+ s->group[group_n].src_pending |= 1 << bit_n;
+ } else {
+ s->group[group_n].src_pending &= ~(1 << bit_n);
+ }
+
+ exynos4210_combiner_update(s, group_n);
+}
+
+static void exynos4210_combiner_reset(DeviceState *d)
+{
+ struct Exynos4210CombinerState *s = (struct Exynos4210CombinerState *)d;
+
+ memset(&s->group, 0, sizeof(s->group));
+ memset(&s->reg_set, 0, sizeof(s->reg_set));
+
+ s->reg_set[0xC0 >> 2] = 0x01010101;
+ s->reg_set[0xC4 >> 2] = 0x01010101;
+ s->reg_set[0xD0 >> 2] = 0x01010101;
+ s->reg_set[0xD4 >> 2] = 0x01010101;
+}
+
+static const MemoryRegionOps exynos4210_combiner_ops = {
+ .read = exynos4210_combiner_read,
+ .write = exynos4210_combiner_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+/*
+ * Internal Combiner initialization.
+ */
+static void exynos4210_combiner_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ Exynos4210CombinerState *s = EXYNOS4210_COMBINER(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ unsigned int i;
+
+ /* Allocate general purpose input signals and connect a handler to each of
+ * them */
+ qdev_init_gpio_in(dev, exynos4210_combiner_handler, IIC_NIRQ);
+
+ /* Connect SysBusDev irqs to device specific irqs */
+ for (i = 0; i < IIC_NGRP; i++) {
+ sysbus_init_irq(sbd, &s->output_irq[i]);
+ }
+
+ memory_region_init_io(&s->iomem, obj, &exynos4210_combiner_ops, s,
+ "exynos4210-combiner", IIC_REGION_SIZE);
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static Property exynos4210_combiner_properties[] = {
+ DEFINE_PROP_UINT32("external", Exynos4210CombinerState, external, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void exynos4210_combiner_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = exynos4210_combiner_reset;
+ device_class_set_props(dc, exynos4210_combiner_properties);
+ dc->vmsd = &vmstate_exynos4210_combiner;
+}
+
+static const TypeInfo exynos4210_combiner_info = {
+ .name = TYPE_EXYNOS4210_COMBINER,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Exynos4210CombinerState),
+ .instance_init = exynos4210_combiner_init,
+ .class_init = exynos4210_combiner_class_init,
+};
+
+static void exynos4210_combiner_register_types(void)
+{
+ type_register_static(&exynos4210_combiner_info);
+}
+
+type_init(exynos4210_combiner_register_types)
diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c
new file mode 100644
index 000000000..bc73d1f11
--- /dev/null
+++ b/hw/intc/exynos4210_gic.c
@@ -0,0 +1,482 @@
+/*
+ * Samsung exynos4210 GIC implementation. Based on hw/arm_gic.c
+ *
+ * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd.
+ * All rights reserved.
+ *
+ * Evgeny Voevodin <e.voevodin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/arm/exynos4210.h"
+#include "qom/object.h"
+
+enum ExtGicId {
+ EXT_GIC_ID_MDMA_LCD0 = 66,
+ EXT_GIC_ID_PDMA0,
+ EXT_GIC_ID_PDMA1,
+ EXT_GIC_ID_TIMER0,
+ EXT_GIC_ID_TIMER1,
+ EXT_GIC_ID_TIMER2,
+ EXT_GIC_ID_TIMER3,
+ EXT_GIC_ID_TIMER4,
+ EXT_GIC_ID_MCT_L0,
+ EXT_GIC_ID_WDT,
+ EXT_GIC_ID_RTC_ALARM,
+ EXT_GIC_ID_RTC_TIC,
+ EXT_GIC_ID_GPIO_XB,
+ EXT_GIC_ID_GPIO_XA,
+ EXT_GIC_ID_MCT_L1,
+ EXT_GIC_ID_IEM_APC,
+ EXT_GIC_ID_IEM_IEC,
+ EXT_GIC_ID_NFC,
+ EXT_GIC_ID_UART0,
+ EXT_GIC_ID_UART1,
+ EXT_GIC_ID_UART2,
+ EXT_GIC_ID_UART3,
+ EXT_GIC_ID_UART4,
+ EXT_GIC_ID_MCT_G0,
+ EXT_GIC_ID_I2C0,
+ EXT_GIC_ID_I2C1,
+ EXT_GIC_ID_I2C2,
+ EXT_GIC_ID_I2C3,
+ EXT_GIC_ID_I2C4,
+ EXT_GIC_ID_I2C5,
+ EXT_GIC_ID_I2C6,
+ EXT_GIC_ID_I2C7,
+ EXT_GIC_ID_SPI0,
+ EXT_GIC_ID_SPI1,
+ EXT_GIC_ID_SPI2,
+ EXT_GIC_ID_MCT_G1,
+ EXT_GIC_ID_USB_HOST,
+ EXT_GIC_ID_USB_DEVICE,
+ EXT_GIC_ID_MODEMIF,
+ EXT_GIC_ID_HSMMC0,
+ EXT_GIC_ID_HSMMC1,
+ EXT_GIC_ID_HSMMC2,
+ EXT_GIC_ID_HSMMC3,
+ EXT_GIC_ID_SDMMC,
+ EXT_GIC_ID_MIPI_CSI_4LANE,
+ EXT_GIC_ID_MIPI_DSI_4LANE,
+ EXT_GIC_ID_MIPI_CSI_2LANE,
+ EXT_GIC_ID_MIPI_DSI_2LANE,
+ EXT_GIC_ID_ONENAND_AUDI,
+ EXT_GIC_ID_ROTATOR,
+ EXT_GIC_ID_FIMC0,
+ EXT_GIC_ID_FIMC1,
+ EXT_GIC_ID_FIMC2,
+ EXT_GIC_ID_FIMC3,
+ EXT_GIC_ID_JPEG,
+ EXT_GIC_ID_2D,
+ EXT_GIC_ID_PCIe,
+ EXT_GIC_ID_MIXER,
+ EXT_GIC_ID_HDMI,
+ EXT_GIC_ID_HDMI_I2C,
+ EXT_GIC_ID_MFC,
+ EXT_GIC_ID_TVENC,
+};
+
+enum ExtInt {
+ EXT_GIC_ID_EXTINT0 = 48,
+ EXT_GIC_ID_EXTINT1,
+ EXT_GIC_ID_EXTINT2,
+ EXT_GIC_ID_EXTINT3,
+ EXT_GIC_ID_EXTINT4,
+ EXT_GIC_ID_EXTINT5,
+ EXT_GIC_ID_EXTINT6,
+ EXT_GIC_ID_EXTINT7,
+ EXT_GIC_ID_EXTINT8,
+ EXT_GIC_ID_EXTINT9,
+ EXT_GIC_ID_EXTINT10,
+ EXT_GIC_ID_EXTINT11,
+ EXT_GIC_ID_EXTINT12,
+ EXT_GIC_ID_EXTINT13,
+ EXT_GIC_ID_EXTINT14,
+ EXT_GIC_ID_EXTINT15
+};
+
+/*
+ * External GIC sources which are not from External Interrupt Combiner or
+ * External Interrupts are starting from EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ,
+ * which is INTG16 in Internal Interrupt Combiner.
+ */
+
+static const uint32_t
+combiner_grp_to_gic_id[64-EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][8] = {
+ /* int combiner groups 16-19 */
+ { }, { }, { }, { },
+ /* int combiner group 20 */
+ { 0, EXT_GIC_ID_MDMA_LCD0 },
+ /* int combiner group 21 */
+ { EXT_GIC_ID_PDMA0, EXT_GIC_ID_PDMA1 },
+ /* int combiner group 22 */
+ { EXT_GIC_ID_TIMER0, EXT_GIC_ID_TIMER1, EXT_GIC_ID_TIMER2,
+ EXT_GIC_ID_TIMER3, EXT_GIC_ID_TIMER4 },
+ /* int combiner group 23 */
+ { EXT_GIC_ID_RTC_ALARM, EXT_GIC_ID_RTC_TIC },
+ /* int combiner group 24 */
+ { EXT_GIC_ID_GPIO_XB, EXT_GIC_ID_GPIO_XA },
+ /* int combiner group 25 */
+ { EXT_GIC_ID_IEM_APC, EXT_GIC_ID_IEM_IEC },
+ /* int combiner group 26 */
+ { EXT_GIC_ID_UART0, EXT_GIC_ID_UART1, EXT_GIC_ID_UART2, EXT_GIC_ID_UART3,
+ EXT_GIC_ID_UART4 },
+ /* int combiner group 27 */
+ { EXT_GIC_ID_I2C0, EXT_GIC_ID_I2C1, EXT_GIC_ID_I2C2, EXT_GIC_ID_I2C3,
+ EXT_GIC_ID_I2C4, EXT_GIC_ID_I2C5, EXT_GIC_ID_I2C6,
+ EXT_GIC_ID_I2C7 },
+ /* int combiner group 28 */
+ { EXT_GIC_ID_SPI0, EXT_GIC_ID_SPI1, EXT_GIC_ID_SPI2 , EXT_GIC_ID_USB_HOST},
+ /* int combiner group 29 */
+ { EXT_GIC_ID_HSMMC0, EXT_GIC_ID_HSMMC1, EXT_GIC_ID_HSMMC2,
+ EXT_GIC_ID_HSMMC3, EXT_GIC_ID_SDMMC },
+ /* int combiner group 30 */
+ { EXT_GIC_ID_MIPI_CSI_4LANE, EXT_GIC_ID_MIPI_CSI_2LANE },
+ /* int combiner group 31 */
+ { EXT_GIC_ID_MIPI_DSI_4LANE, EXT_GIC_ID_MIPI_DSI_2LANE },
+ /* int combiner group 32 */
+ { EXT_GIC_ID_FIMC0, EXT_GIC_ID_FIMC1 },
+ /* int combiner group 33 */
+ { EXT_GIC_ID_FIMC2, EXT_GIC_ID_FIMC3 },
+ /* int combiner group 34 */
+ { EXT_GIC_ID_ONENAND_AUDI, EXT_GIC_ID_NFC },
+ /* int combiner group 35 */
+ { 0, 0, 0, EXT_GIC_ID_MCT_L1, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 },
+ /* int combiner group 36 */
+ { EXT_GIC_ID_MIXER },
+ /* int combiner group 37 */
+ { EXT_GIC_ID_EXTINT4, EXT_GIC_ID_EXTINT5, EXT_GIC_ID_EXTINT6,
+ EXT_GIC_ID_EXTINT7 },
+ /* groups 38-50 */
+ { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { },
+ /* int combiner group 51 */
+ { EXT_GIC_ID_MCT_L0, 0, 0, 0, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 },
+ /* group 52 */
+ { },
+ /* int combiner group 53 */
+ { EXT_GIC_ID_WDT, 0, 0, 0, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 },
+ /* groups 54-63 */
+ { }, { }, { }, { }, { }, { }, { }, { }, { }, { }
+};
+
+#define EXYNOS4210_GIC_NIRQ 160
+
+#define EXYNOS4210_EXT_GIC_CPU_REGION_SIZE 0x10000
+#define EXYNOS4210_EXT_GIC_DIST_REGION_SIZE 0x10000
+
+#define EXYNOS4210_EXT_GIC_PER_CPU_OFFSET 0x8000
+#define EXYNOS4210_EXT_GIC_CPU_GET_OFFSET(n) \
+ ((n) * EXYNOS4210_EXT_GIC_PER_CPU_OFFSET)
+#define EXYNOS4210_EXT_GIC_DIST_GET_OFFSET(n) \
+ ((n) * EXYNOS4210_EXT_GIC_PER_CPU_OFFSET)
+
+#define EXYNOS4210_GIC_CPU_REGION_SIZE 0x100
+#define EXYNOS4210_GIC_DIST_REGION_SIZE 0x1000
+
+static void exynos4210_irq_handler(void *opaque, int irq, int level)
+{
+ Exynos4210Irq *s = (Exynos4210Irq *)opaque;
+
+ /* Bypass */
+ qemu_set_irq(s->board_irqs[irq], level);
+}
+
+/*
+ * Initialize exynos4210 IRQ subsystem stub.
+ */
+qemu_irq *exynos4210_init_irq(Exynos4210Irq *s)
+{
+ return qemu_allocate_irqs(exynos4210_irq_handler, s,
+ EXYNOS4210_MAX_INT_COMBINER_IN_IRQ);
+}
+
+/*
+ * Initialize board IRQs.
+ * These IRQs contain splitted Int/External Combiner and External Gic IRQs.
+ */
+void exynos4210_init_board_irqs(Exynos4210Irq *s)
+{
+ uint32_t grp, bit, irq_id, n;
+
+ for (n = 0; n < EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ; n++) {
+ irq_id = 0;
+ if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 4) ||
+ n == EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 4)) {
+ /* MCT_G0 is passed to External GIC */
+ irq_id = EXT_GIC_ID_MCT_G0;
+ }
+ if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 5) ||
+ n == EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 5)) {
+ /* MCT_G1 is passed to External and GIC */
+ irq_id = EXT_GIC_ID_MCT_G1;
+ }
+ if (irq_id) {
+ s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n],
+ s->ext_gic_irq[irq_id-32]);
+ } else {
+ s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n],
+ s->ext_combiner_irq[n]);
+ }
+ }
+ for (; n < EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; n++) {
+ /* these IDs are passed to Internal Combiner and External GIC */
+ grp = EXYNOS4210_COMBINER_GET_GRP_NUM(n);
+ bit = EXYNOS4210_COMBINER_GET_BIT_NUM(n);
+ irq_id = combiner_grp_to_gic_id[grp -
+ EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][bit];
+
+ if (irq_id) {
+ s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n],
+ s->ext_gic_irq[irq_id-32]);
+ }
+ }
+}
+
+/*
+ * Get IRQ number from exynos4210 IRQ subsystem stub.
+ * To identify IRQ source use internal combiner group and bit number
+ * grp - group number
+ * bit - bit number inside group
+ */
+uint32_t exynos4210_get_irq(uint32_t grp, uint32_t bit)
+{
+ return EXYNOS4210_COMBINER_GET_IRQ_NUM(grp, bit);
+}
+
+/********* GIC part *********/
+
+#define TYPE_EXYNOS4210_GIC "exynos4210.gic"
+OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210GicState, EXYNOS4210_GIC)
+
+struct Exynos4210GicState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion cpu_container;
+ MemoryRegion dist_container;
+ MemoryRegion cpu_alias[EXYNOS4210_NCPUS];
+ MemoryRegion dist_alias[EXYNOS4210_NCPUS];
+ uint32_t num_cpu;
+ DeviceState *gic;
+};
+
+static void exynos4210_gic_set_irq(void *opaque, int irq, int level)
+{
+ Exynos4210GicState *s = (Exynos4210GicState *)opaque;
+ qemu_set_irq(qdev_get_gpio_in(s->gic, irq), level);
+}
+
+static void exynos4210_gic_realize(DeviceState *dev, Error **errp)
+{
+ Object *obj = OBJECT(dev);
+ Exynos4210GicState *s = EXYNOS4210_GIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ const char cpu_prefix[] = "exynos4210-gic-alias_cpu";
+ const char dist_prefix[] = "exynos4210-gic-alias_dist";
+ char cpu_alias_name[sizeof(cpu_prefix) + 3];
+ char dist_alias_name[sizeof(cpu_prefix) + 3];
+ SysBusDevice *gicbusdev;
+ uint32_t n = s->num_cpu;
+ uint32_t i;
+
+ s->gic = qdev_new("arm_gic");
+ qdev_prop_set_uint32(s->gic, "num-cpu", s->num_cpu);
+ qdev_prop_set_uint32(s->gic, "num-irq", EXYNOS4210_GIC_NIRQ);
+ gicbusdev = SYS_BUS_DEVICE(s->gic);
+ sysbus_realize_and_unref(gicbusdev, &error_fatal);
+
+ /* Pass through outbound IRQ lines from the GIC */
+ sysbus_pass_irq(sbd, gicbusdev);
+
+ /* Pass through inbound GPIO lines to the GIC */
+ qdev_init_gpio_in(dev, exynos4210_gic_set_irq,
+ EXYNOS4210_GIC_NIRQ - 32);
+
+ memory_region_init(&s->cpu_container, obj, "exynos4210-cpu-container",
+ EXYNOS4210_EXT_GIC_CPU_REGION_SIZE);
+ memory_region_init(&s->dist_container, obj, "exynos4210-dist-container",
+ EXYNOS4210_EXT_GIC_DIST_REGION_SIZE);
+
+ /*
+ * This clues in gcc that our on-stack buffers do, in fact have
+ * enough room for the cpu numbers. gcc 9.2.1 on 32-bit x86
+ * doesn't figure this out, otherwise and gives spurious warnings.
+ */
+ assert(n <= EXYNOS4210_NCPUS);
+ for (i = 0; i < n; i++) {
+ /* Map CPU interface per SMP Core */
+ sprintf(cpu_alias_name, "%s%x", cpu_prefix, i);
+ memory_region_init_alias(&s->cpu_alias[i], obj,
+ cpu_alias_name,
+ sysbus_mmio_get_region(gicbusdev, 1),
+ 0,
+ EXYNOS4210_GIC_CPU_REGION_SIZE);
+ memory_region_add_subregion(&s->cpu_container,
+ EXYNOS4210_EXT_GIC_CPU_GET_OFFSET(i), &s->cpu_alias[i]);
+
+ /* Map Distributor per SMP Core */
+ sprintf(dist_alias_name, "%s%x", dist_prefix, i);
+ memory_region_init_alias(&s->dist_alias[i], obj,
+ dist_alias_name,
+ sysbus_mmio_get_region(gicbusdev, 0),
+ 0,
+ EXYNOS4210_GIC_DIST_REGION_SIZE);
+ memory_region_add_subregion(&s->dist_container,
+ EXYNOS4210_EXT_GIC_DIST_GET_OFFSET(i), &s->dist_alias[i]);
+ }
+
+ sysbus_init_mmio(sbd, &s->cpu_container);
+ sysbus_init_mmio(sbd, &s->dist_container);
+}
+
+static Property exynos4210_gic_properties[] = {
+ DEFINE_PROP_UINT32("num-cpu", Exynos4210GicState, num_cpu, 1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void exynos4210_gic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, exynos4210_gic_properties);
+ dc->realize = exynos4210_gic_realize;
+}
+
+static const TypeInfo exynos4210_gic_info = {
+ .name = TYPE_EXYNOS4210_GIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Exynos4210GicState),
+ .class_init = exynos4210_gic_class_init,
+};
+
+static void exynos4210_gic_register_types(void)
+{
+ type_register_static(&exynos4210_gic_info);
+}
+
+type_init(exynos4210_gic_register_types)
+
+/* IRQ OR Gate struct.
+ *
+ * This device models an OR gate. There are n_in input qdev gpio lines and one
+ * output sysbus IRQ line. The output IRQ level is formed as OR between all
+ * gpio inputs.
+ */
+
+#define TYPE_EXYNOS4210_IRQ_GATE "exynos4210.irq_gate"
+OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210IRQGateState, EXYNOS4210_IRQ_GATE)
+
+struct Exynos4210IRQGateState {
+ SysBusDevice parent_obj;
+
+ uint32_t n_in; /* inputs amount */
+ uint32_t *level; /* input levels */
+ qemu_irq out; /* output IRQ */
+};
+
+static Property exynos4210_irq_gate_properties[] = {
+ DEFINE_PROP_UINT32("n_in", Exynos4210IRQGateState, n_in, 1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription vmstate_exynos4210_irq_gate = {
+ .name = "exynos4210.irq_gate",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_VBUFFER_UINT32(level, Exynos4210IRQGateState, 1, NULL, n_in),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* Process a change in IRQ input. */
+static void exynos4210_irq_gate_handler(void *opaque, int irq, int level)
+{
+ Exynos4210IRQGateState *s = (Exynos4210IRQGateState *)opaque;
+ uint32_t i;
+
+ assert(irq < s->n_in);
+
+ s->level[irq] = level;
+
+ for (i = 0; i < s->n_in; i++) {
+ if (s->level[i] >= 1) {
+ qemu_irq_raise(s->out);
+ return;
+ }
+ }
+
+ qemu_irq_lower(s->out);
+}
+
+static void exynos4210_irq_gate_reset(DeviceState *d)
+{
+ Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(d);
+
+ memset(s->level, 0, s->n_in * sizeof(*s->level));
+}
+
+/*
+ * IRQ Gate initialization.
+ */
+static void exynos4210_irq_gate_init(Object *obj)
+{
+ Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ sysbus_init_irq(sbd, &s->out);
+}
+
+static void exynos4210_irq_gate_realize(DeviceState *dev, Error **errp)
+{
+ Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(dev);
+
+ /* Allocate general purpose input signals and connect a handler to each of
+ * them */
+ qdev_init_gpio_in(dev, exynos4210_irq_gate_handler, s->n_in);
+
+ s->level = g_malloc0(s->n_in * sizeof(*s->level));
+}
+
+static void exynos4210_irq_gate_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = exynos4210_irq_gate_reset;
+ dc->vmsd = &vmstate_exynos4210_irq_gate;
+ device_class_set_props(dc, exynos4210_irq_gate_properties);
+ dc->realize = exynos4210_irq_gate_realize;
+}
+
+static const TypeInfo exynos4210_irq_gate_info = {
+ .name = TYPE_EXYNOS4210_IRQ_GATE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Exynos4210IRQGateState),
+ .instance_init = exynos4210_irq_gate_init,
+ .class_init = exynos4210_irq_gate_class_init,
+};
+
+static void exynos4210_irq_gate_register_types(void)
+{
+ type_register_static(&exynos4210_irq_gate_info);
+}
+
+type_init(exynos4210_irq_gate_register_types)
diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
new file mode 100644
index 000000000..8d29b40ca
--- /dev/null
+++ b/hw/intc/gic_internal.h
@@ -0,0 +1,322 @@
+/*
+ * ARM GIC support - internal interfaces
+ *
+ * Copyright (c) 2012 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_ARM_GIC_INTERNAL_H
+#define QEMU_ARM_GIC_INTERNAL_H
+
+#include "hw/registerfields.h"
+#include "hw/intc/arm_gic.h"
+
+#define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1)))
+
+#define GIC_DIST_SET_ENABLED(irq, cm) (s->irq_state[irq].enabled |= (cm))
+#define GIC_DIST_CLEAR_ENABLED(irq, cm) (s->irq_state[irq].enabled &= ~(cm))
+#define GIC_DIST_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
+#define GIC_DIST_SET_PENDING(irq, cm) (s->irq_state[irq].pending |= (cm))
+#define GIC_DIST_CLEAR_PENDING(irq, cm) (s->irq_state[irq].pending &= ~(cm))
+#define GIC_DIST_SET_ACTIVE(irq, cm) (s->irq_state[irq].active |= (cm))
+#define GIC_DIST_CLEAR_ACTIVE(irq, cm) (s->irq_state[irq].active &= ~(cm))
+#define GIC_DIST_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
+#define GIC_DIST_SET_MODEL(irq) (s->irq_state[irq].model = true)
+#define GIC_DIST_CLEAR_MODEL(irq) (s->irq_state[irq].model = false)
+#define GIC_DIST_TEST_MODEL(irq) (s->irq_state[irq].model)
+#define GIC_DIST_SET_LEVEL(irq, cm) (s->irq_state[irq].level |= (cm))
+#define GIC_DIST_CLEAR_LEVEL(irq, cm) (s->irq_state[irq].level &= ~(cm))
+#define GIC_DIST_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
+#define GIC_DIST_SET_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger = true)
+#define GIC_DIST_CLEAR_EDGE_TRIGGER(irq) \
+ (s->irq_state[irq].edge_trigger = false)
+#define GIC_DIST_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger)
+#define GIC_DIST_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
+ s->priority1[irq][cpu] : \
+ s->priority2[(irq) - GIC_INTERNAL])
+#define GIC_DIST_TARGET(irq) (s->irq_target[irq])
+#define GIC_DIST_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm))
+#define GIC_DIST_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm))
+#define GIC_DIST_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0)
+
+#define GICD_CTLR_EN_GRP0 (1U << 0)
+#define GICD_CTLR_EN_GRP1 (1U << 1)
+
+#define GICC_CTLR_EN_GRP0 (1U << 0)
+#define GICC_CTLR_EN_GRP1 (1U << 1)
+#define GICC_CTLR_ACK_CTL (1U << 2)
+#define GICC_CTLR_FIQ_EN (1U << 3)
+#define GICC_CTLR_CBPR (1U << 4) /* GICv1: SBPR */
+#define GICC_CTLR_EOIMODE (1U << 9)
+#define GICC_CTLR_EOIMODE_NS (1U << 10)
+
+REG32(GICH_HCR, 0x0)
+ FIELD(GICH_HCR, EN, 0, 1)
+ FIELD(GICH_HCR, UIE, 1, 1)
+ FIELD(GICH_HCR, LRENPIE, 2, 1)
+ FIELD(GICH_HCR, NPIE, 3, 1)
+ FIELD(GICH_HCR, VGRP0EIE, 4, 1)
+ FIELD(GICH_HCR, VGRP0DIE, 5, 1)
+ FIELD(GICH_HCR, VGRP1EIE, 6, 1)
+ FIELD(GICH_HCR, VGRP1DIE, 7, 1)
+ FIELD(GICH_HCR, EOICount, 27, 5)
+
+#define GICH_HCR_MASK \
+ (R_GICH_HCR_EN_MASK | R_GICH_HCR_UIE_MASK | \
+ R_GICH_HCR_LRENPIE_MASK | R_GICH_HCR_NPIE_MASK | \
+ R_GICH_HCR_VGRP0EIE_MASK | R_GICH_HCR_VGRP0DIE_MASK | \
+ R_GICH_HCR_VGRP1EIE_MASK | R_GICH_HCR_VGRP1DIE_MASK | \
+ R_GICH_HCR_EOICount_MASK)
+
+REG32(GICH_VTR, 0x4)
+ FIELD(GICH_VTR, ListRegs, 0, 6)
+ FIELD(GICH_VTR, PREbits, 26, 3)
+ FIELD(GICH_VTR, PRIbits, 29, 3)
+
+REG32(GICH_VMCR, 0x8)
+ FIELD(GICH_VMCR, VMCCtlr, 0, 10)
+ FIELD(GICH_VMCR, VMABP, 18, 3)
+ FIELD(GICH_VMCR, VMBP, 21, 3)
+ FIELD(GICH_VMCR, VMPriMask, 27, 5)
+
+REG32(GICH_MISR, 0x10)
+ FIELD(GICH_MISR, EOI, 0, 1)
+ FIELD(GICH_MISR, U, 1, 1)
+ FIELD(GICH_MISR, LRENP, 2, 1)
+ FIELD(GICH_MISR, NP, 3, 1)
+ FIELD(GICH_MISR, VGrp0E, 4, 1)
+ FIELD(GICH_MISR, VGrp0D, 5, 1)
+ FIELD(GICH_MISR, VGrp1E, 6, 1)
+ FIELD(GICH_MISR, VGrp1D, 7, 1)
+
+REG32(GICH_EISR0, 0x20)
+REG32(GICH_EISR1, 0x24)
+REG32(GICH_ELRSR0, 0x30)
+REG32(GICH_ELRSR1, 0x34)
+REG32(GICH_APR, 0xf0)
+
+REG32(GICH_LR0, 0x100)
+ FIELD(GICH_LR0, VirtualID, 0, 10)
+ FIELD(GICH_LR0, PhysicalID, 10, 10)
+ FIELD(GICH_LR0, CPUID, 10, 3)
+ FIELD(GICH_LR0, EOI, 19, 1)
+ FIELD(GICH_LR0, Priority, 23, 5)
+ FIELD(GICH_LR0, State, 28, 2)
+ FIELD(GICH_LR0, Grp1, 30, 1)
+ FIELD(GICH_LR0, HW, 31, 1)
+
+/* Last LR register */
+REG32(GICH_LR63, 0x1fc)
+
+#define GICH_LR_MASK \
+ (R_GICH_LR0_VirtualID_MASK | R_GICH_LR0_PhysicalID_MASK | \
+ R_GICH_LR0_CPUID_MASK | R_GICH_LR0_EOI_MASK | \
+ R_GICH_LR0_Priority_MASK | R_GICH_LR0_State_MASK | \
+ R_GICH_LR0_Grp1_MASK | R_GICH_LR0_HW_MASK)
+
+#define GICH_LR_STATE_INVALID 0
+#define GICH_LR_STATE_PENDING 1
+#define GICH_LR_STATE_ACTIVE 2
+#define GICH_LR_STATE_ACTIVE_PENDING 3
+
+#define GICH_LR_VIRT_ID(entry) (FIELD_EX32(entry, GICH_LR0, VirtualID))
+#define GICH_LR_PHYS_ID(entry) (FIELD_EX32(entry, GICH_LR0, PhysicalID))
+#define GICH_LR_CPUID(entry) (FIELD_EX32(entry, GICH_LR0, CPUID))
+#define GICH_LR_EOI(entry) (FIELD_EX32(entry, GICH_LR0, EOI))
+#define GICH_LR_PRIORITY(entry) (FIELD_EX32(entry, GICH_LR0, Priority) << 3)
+#define GICH_LR_STATE(entry) (FIELD_EX32(entry, GICH_LR0, State))
+#define GICH_LR_GROUP(entry) (FIELD_EX32(entry, GICH_LR0, Grp1))
+#define GICH_LR_HW(entry) (FIELD_EX32(entry, GICH_LR0, HW))
+
+#define GICH_LR_CLEAR_PENDING(entry) \
+ ((entry) &= ~(GICH_LR_STATE_PENDING << R_GICH_LR0_State_SHIFT))
+#define GICH_LR_SET_ACTIVE(entry) \
+ ((entry) |= (GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
+#define GICH_LR_CLEAR_ACTIVE(entry) \
+ ((entry) &= ~(GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
+
+/* Valid bits for GICC_CTLR for GICv1, v1 with security extensions,
+ * GICv2 and GICv2 with security extensions:
+ */
+#define GICC_CTLR_V1_MASK 0x1
+#define GICC_CTLR_V1_S_MASK 0x1f
+#define GICC_CTLR_V2_MASK 0x21f
+#define GICC_CTLR_V2_S_MASK 0x61f
+
+/* The special cases for the revision property: */
+#define REV_11MPCORE 0
+
+uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs);
+void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
+ MemTxAttrs attrs);
+
+static inline bool gic_test_pending(GICState *s, int irq, int cm)
+{
+ if (s->revision == REV_11MPCORE) {
+ return s->irq_state[irq].pending & cm;
+ } else {
+ /* Edge-triggered interrupts are marked pending on a rising edge, but
+ * level-triggered interrupts are either considered pending when the
+ * level is active or if software has explicitly written to
+ * GICD_ISPENDR to set the state pending.
+ */
+ return (s->irq_state[irq].pending & cm) ||
+ (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_LEVEL(irq, cm));
+ }
+}
+
+static inline bool gic_is_vcpu(int cpu)
+{
+ return cpu >= GIC_NCPU;
+}
+
+static inline int gic_get_vcpu_real_id(int cpu)
+{
+ return (cpu >= GIC_NCPU) ? (cpu - GIC_NCPU) : cpu;
+}
+
+/* Return true if the given vIRQ state exists in a LR and is either active or
+ * pending and active.
+ *
+ * This function is used to check that a guest's `end of interrupt' or
+ * `interrupts deactivation' request is valid, and matches with a LR of an
+ * already acknowledged vIRQ (i.e. has the active bit set in its state).
+ */
+static inline bool gic_virq_is_valid(GICState *s, int irq, int vcpu)
+{
+ int cpu = gic_get_vcpu_real_id(vcpu);
+ int lr_idx;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+
+ if ((GICH_LR_VIRT_ID(*entry) == irq) &&
+ (GICH_LR_STATE(*entry) & GICH_LR_STATE_ACTIVE)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Return a pointer on the LR entry matching the given vIRQ.
+ *
+ * This function is used to retrieve an LR for which we know for sure that the
+ * corresponding vIRQ exists in the current context (i.e. its current state is
+ * not `invalid'):
+ * - Either the corresponding vIRQ has been validated with gic_virq_is_valid()
+ * so it is `active' or `active and pending',
+ * - Or it was pending and has been selected by gic_get_best_virq(). It is now
+ * `pending', `active' or `active and pending', depending on what the guest
+ * already did with this vIRQ.
+ *
+ * Having multiple LRs with the same VirtualID leads to UNPREDICTABLE
+ * behaviour in the GIC. We choose to return the first one that matches.
+ */
+static inline uint32_t *gic_get_lr_entry(GICState *s, int irq, int vcpu)
+{
+ int cpu = gic_get_vcpu_real_id(vcpu);
+ int lr_idx;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+
+ if ((GICH_LR_VIRT_ID(*entry) == irq) &&
+ (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID)) {
+ return entry;
+ }
+ }
+
+ g_assert_not_reached();
+}
+
+static inline bool gic_test_group(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ return GICH_LR_GROUP(*entry);
+ } else {
+ return GIC_DIST_TEST_GROUP(irq, 1 << cpu);
+ }
+}
+
+static inline void gic_clear_pending(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ GICH_LR_CLEAR_PENDING(*entry);
+ } else {
+ /* Clear pending state for both level and edge triggered
+ * interrupts. (level triggered interrupts with an active line
+ * remain pending, see gic_test_pending)
+ */
+ GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
+ : (1 << cpu));
+ }
+}
+
+static inline void gic_set_active(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ GICH_LR_SET_ACTIVE(*entry);
+ } else {
+ GIC_DIST_SET_ACTIVE(irq, 1 << cpu);
+ }
+}
+
+static inline void gic_clear_active(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ GICH_LR_CLEAR_ACTIVE(*entry);
+
+ if (GICH_LR_HW(*entry)) {
+ /* Hardware interrupt. We must forward the deactivation request to
+ * the distributor.
+ */
+ int phys_irq = GICH_LR_PHYS_ID(*entry);
+ int rcpu = gic_get_vcpu_real_id(cpu);
+
+ if (phys_irq < GIC_NR_SGIS || phys_irq >= GIC_MAXIRQ) {
+ /* UNPREDICTABLE behaviour, we choose to ignore the request */
+ return;
+ }
+
+ /* This is equivalent to a NS write to DIR on the physical CPU
+ * interface. Hence group0 interrupt deactivation is ignored if
+ * the GIC is secure.
+ */
+ if (!s->security_extn || GIC_DIST_TEST_GROUP(phys_irq, 1 << rcpu)) {
+ GIC_DIST_CLEAR_ACTIVE(phys_irq, 1 << rcpu);
+ }
+ }
+ } else {
+ GIC_DIST_CLEAR_ACTIVE(irq, 1 << cpu);
+ }
+}
+
+static inline int gic_get_priority(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ return GICH_LR_PRIORITY(*entry);
+ } else {
+ return GIC_DIST_GET_PRIORITY(irq, cpu);
+ }
+}
+
+#endif /* QEMU_ARM_GIC_INTERNAL_H */
diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h
new file mode 100644
index 000000000..b9c37453b
--- /dev/null
+++ b/hw/intc/gicv3_internal.h
@@ -0,0 +1,611 @@
+/*
+ * ARM GICv3 support - internal interfaces
+ *
+ * Copyright (c) 2012 Linaro Limited
+ * Copyright (c) 2015 Huawei.
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Written by Peter Maydell
+ * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_ARM_GICV3_INTERNAL_H
+#define QEMU_ARM_GICV3_INTERNAL_H
+
+#include "hw/registerfields.h"
+#include "hw/intc/arm_gicv3_common.h"
+
+/* Distributor registers, as offsets from the distributor base address */
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IIDR 0x0008
+#define GICD_STATUSR 0x0010
+#define GICD_SETSPI_NSR 0x0040
+#define GICD_CLRSPI_NSR 0x0048
+#define GICD_SETSPI_SR 0x0050
+#define GICD_CLRSPI_SR 0x0058
+#define GICD_SEIR 0x0068
+#define GICD_IGROUPR 0x0080
+#define GICD_ISENABLER 0x0100
+#define GICD_ICENABLER 0x0180
+#define GICD_ISPENDR 0x0200
+#define GICD_ICPENDR 0x0280
+#define GICD_ISACTIVER 0x0300
+#define GICD_ICACTIVER 0x0380
+#define GICD_IPRIORITYR 0x0400
+#define GICD_ITARGETSR 0x0800
+#define GICD_ICFGR 0x0C00
+#define GICD_IGRPMODR 0x0D00
+#define GICD_NSACR 0x0E00
+#define GICD_SGIR 0x0F00
+#define GICD_CPENDSGIR 0x0F10
+#define GICD_SPENDSGIR 0x0F20
+#define GICD_IROUTER 0x6000
+#define GICD_IDREGS 0xFFD0
+
+/* GICD_CTLR fields */
+#define GICD_CTLR_EN_GRP0 (1U << 0)
+#define GICD_CTLR_EN_GRP1NS (1U << 1) /* GICv3 5.3.20 */
+#define GICD_CTLR_EN_GRP1S (1U << 2)
+#define GICD_CTLR_EN_GRP1_ALL (GICD_CTLR_EN_GRP1NS | GICD_CTLR_EN_GRP1S)
+/* Bit 4 is ARE if the system doesn't support TrustZone, ARE_S otherwise */
+#define GICD_CTLR_ARE (1U << 4)
+#define GICD_CTLR_ARE_S (1U << 4)
+#define GICD_CTLR_ARE_NS (1U << 5)
+#define GICD_CTLR_DS (1U << 6)
+#define GICD_CTLR_E1NWF (1U << 7)
+#define GICD_CTLR_RWP (1U << 31)
+
+#define GICD_TYPER_LPIS_SHIFT 17
+
+/* 16 bits EventId */
+#define GICD_TYPER_IDBITS 0xf
+
+/*
+ * Redistributor frame offsets from RD_base
+ */
+#define GICR_SGI_OFFSET 0x10000
+
+/*
+ * Redistributor registers, offsets from RD_base
+ */
+#define GICR_CTLR 0x0000
+#define GICR_IIDR 0x0004
+#define GICR_TYPER 0x0008
+#define GICR_STATUSR 0x0010
+#define GICR_WAKER 0x0014
+#define GICR_SETLPIR 0x0040
+#define GICR_CLRLPIR 0x0048
+#define GICR_PROPBASER 0x0070
+#define GICR_PENDBASER 0x0078
+#define GICR_INVLPIR 0x00A0
+#define GICR_INVALLR 0x00B0
+#define GICR_SYNCR 0x00C0
+#define GICR_IDREGS 0xFFD0
+
+/* SGI and PPI Redistributor registers, offsets from RD_base */
+#define GICR_IGROUPR0 (GICR_SGI_OFFSET + 0x0080)
+#define GICR_ISENABLER0 (GICR_SGI_OFFSET + 0x0100)
+#define GICR_ICENABLER0 (GICR_SGI_OFFSET + 0x0180)
+#define GICR_ISPENDR0 (GICR_SGI_OFFSET + 0x0200)
+#define GICR_ICPENDR0 (GICR_SGI_OFFSET + 0x0280)
+#define GICR_ISACTIVER0 (GICR_SGI_OFFSET + 0x0300)
+#define GICR_ICACTIVER0 (GICR_SGI_OFFSET + 0x0380)
+#define GICR_IPRIORITYR (GICR_SGI_OFFSET + 0x0400)
+#define GICR_ICFGR0 (GICR_SGI_OFFSET + 0x0C00)
+#define GICR_ICFGR1 (GICR_SGI_OFFSET + 0x0C04)
+#define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00)
+#define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00)
+
+#define GICR_CTLR_ENABLE_LPIS (1U << 0)
+#define GICR_CTLR_RWP (1U << 3)
+#define GICR_CTLR_DPG0 (1U << 24)
+#define GICR_CTLR_DPG1NS (1U << 25)
+#define GICR_CTLR_DPG1S (1U << 26)
+#define GICR_CTLR_UWP (1U << 31)
+
+#define GICR_TYPER_PLPIS (1U << 0)
+#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_DIRECTLPI (1U << 3)
+#define GICR_TYPER_LAST (1U << 4)
+#define GICR_TYPER_DPGS (1U << 5)
+#define GICR_TYPER_PROCNUM (0xFFFFU << 8)
+#define GICR_TYPER_COMMONLPIAFF (0x3 << 24)
+#define GICR_TYPER_AFFINITYVALUE (0xFFFFFFFFULL << 32)
+
+#define GICR_WAKER_ProcessorSleep (1U << 1)
+#define GICR_WAKER_ChildrenAsleep (1U << 2)
+
+FIELD(GICR_PROPBASER, IDBITS, 0, 5)
+FIELD(GICR_PROPBASER, INNERCACHE, 7, 3)
+FIELD(GICR_PROPBASER, SHAREABILITY, 10, 2)
+FIELD(GICR_PROPBASER, PHYADDR, 12, 40)
+FIELD(GICR_PROPBASER, OUTERCACHE, 56, 3)
+
+FIELD(GICR_PENDBASER, INNERCACHE, 7, 3)
+FIELD(GICR_PENDBASER, SHAREABILITY, 10, 2)
+FIELD(GICR_PENDBASER, PHYADDR, 16, 36)
+FIELD(GICR_PENDBASER, OUTERCACHE, 56, 3)
+FIELD(GICR_PENDBASER, PTZ, 62, 1)
+
+#define GICR_PROPBASER_IDBITS_THRESHOLD 0xd
+
+#define ICC_CTLR_EL1_CBPR (1U << 0)
+#define ICC_CTLR_EL1_EOIMODE (1U << 1)
+#define ICC_CTLR_EL1_PMHE (1U << 6)
+#define ICC_CTLR_EL1_PRIBITS_SHIFT 8
+#define ICC_CTLR_EL1_PRIBITS_MASK (7U << ICC_CTLR_EL1_PRIBITS_SHIFT)
+#define ICC_CTLR_EL1_IDBITS_SHIFT 11
+#define ICC_CTLR_EL1_SEIS (1U << 14)
+#define ICC_CTLR_EL1_A3V (1U << 15)
+
+#define ICC_PMR_PRIORITY_MASK 0xff
+#define ICC_BPR_BINARYPOINT_MASK 0x07
+#define ICC_IGRPEN_ENABLE 0x01
+
+#define ICC_CTLR_EL3_CBPR_EL1S (1U << 0)
+#define ICC_CTLR_EL3_CBPR_EL1NS (1U << 1)
+#define ICC_CTLR_EL3_EOIMODE_EL3 (1U << 2)
+#define ICC_CTLR_EL3_EOIMODE_EL1S (1U << 3)
+#define ICC_CTLR_EL3_EOIMODE_EL1NS (1U << 4)
+#define ICC_CTLR_EL3_RM (1U << 5)
+#define ICC_CTLR_EL3_PMHE (1U << 6)
+#define ICC_CTLR_EL3_PRIBITS_SHIFT 8
+#define ICC_CTLR_EL3_IDBITS_SHIFT 11
+#define ICC_CTLR_EL3_SEIS (1U << 14)
+#define ICC_CTLR_EL3_A3V (1U << 15)
+#define ICC_CTLR_EL3_NDS (1U << 17)
+
+#define ICH_VMCR_EL2_VENG0_SHIFT 0
+#define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT)
+#define ICH_VMCR_EL2_VENG1_SHIFT 1
+#define ICH_VMCR_EL2_VENG1 (1U << ICH_VMCR_EL2_VENG1_SHIFT)
+#define ICH_VMCR_EL2_VACKCTL (1U << 2)
+#define ICH_VMCR_EL2_VFIQEN (1U << 3)
+#define ICH_VMCR_EL2_VCBPR_SHIFT 4
+#define ICH_VMCR_EL2_VCBPR (1U << ICH_VMCR_EL2_VCBPR_SHIFT)
+#define ICH_VMCR_EL2_VEOIM_SHIFT 9
+#define ICH_VMCR_EL2_VEOIM (1U << ICH_VMCR_EL2_VEOIM_SHIFT)
+#define ICH_VMCR_EL2_VBPR1_SHIFT 18
+#define ICH_VMCR_EL2_VBPR1_LENGTH 3
+#define ICH_VMCR_EL2_VBPR1_MASK (0x7U << ICH_VMCR_EL2_VBPR1_SHIFT)
+#define ICH_VMCR_EL2_VBPR0_SHIFT 21
+#define ICH_VMCR_EL2_VBPR0_LENGTH 3
+#define ICH_VMCR_EL2_VBPR0_MASK (0x7U << ICH_VMCR_EL2_VBPR0_SHIFT)
+#define ICH_VMCR_EL2_VPMR_SHIFT 24
+#define ICH_VMCR_EL2_VPMR_LENGTH 8
+#define ICH_VMCR_EL2_VPMR_MASK (0xffU << ICH_VMCR_EL2_VPMR_SHIFT)
+
+#define ICH_HCR_EL2_EN (1U << 0)
+#define ICH_HCR_EL2_UIE (1U << 1)
+#define ICH_HCR_EL2_LRENPIE (1U << 2)
+#define ICH_HCR_EL2_NPIE (1U << 3)
+#define ICH_HCR_EL2_VGRP0EIE (1U << 4)
+#define ICH_HCR_EL2_VGRP0DIE (1U << 5)
+#define ICH_HCR_EL2_VGRP1EIE (1U << 6)
+#define ICH_HCR_EL2_VGRP1DIE (1U << 7)
+#define ICH_HCR_EL2_TC (1U << 10)
+#define ICH_HCR_EL2_TALL0 (1U << 11)
+#define ICH_HCR_EL2_TALL1 (1U << 12)
+#define ICH_HCR_EL2_TSEI (1U << 13)
+#define ICH_HCR_EL2_TDIR (1U << 14)
+#define ICH_HCR_EL2_EOICOUNT_SHIFT 27
+#define ICH_HCR_EL2_EOICOUNT_LENGTH 5
+#define ICH_HCR_EL2_EOICOUNT_MASK (0x1fU << ICH_HCR_EL2_EOICOUNT_SHIFT)
+
+#define ICH_LR_EL2_VINTID_SHIFT 0
+#define ICH_LR_EL2_VINTID_LENGTH 32
+#define ICH_LR_EL2_VINTID_MASK (0xffffffffULL << ICH_LR_EL2_VINTID_SHIFT)
+#define ICH_LR_EL2_PINTID_SHIFT 32
+#define ICH_LR_EL2_PINTID_LENGTH 10
+#define ICH_LR_EL2_PINTID_MASK (0x3ffULL << ICH_LR_EL2_PINTID_SHIFT)
+/* Note that EOI shares with the top bit of the pINTID field */
+#define ICH_LR_EL2_EOI (1ULL << 41)
+#define ICH_LR_EL2_PRIORITY_SHIFT 48
+#define ICH_LR_EL2_PRIORITY_LENGTH 8
+#define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT)
+#define ICH_LR_EL2_GROUP (1ULL << 60)
+#define ICH_LR_EL2_HW (1ULL << 61)
+#define ICH_LR_EL2_STATE_SHIFT 62
+#define ICH_LR_EL2_STATE_LENGTH 2
+#define ICH_LR_EL2_STATE_MASK (3ULL << ICH_LR_EL2_STATE_SHIFT)
+/* values for the state field: */
+#define ICH_LR_EL2_STATE_INVALID 0
+#define ICH_LR_EL2_STATE_PENDING 1
+#define ICH_LR_EL2_STATE_ACTIVE 2
+#define ICH_LR_EL2_STATE_ACTIVE_PENDING 3
+#define ICH_LR_EL2_STATE_PENDING_BIT (1ULL << ICH_LR_EL2_STATE_SHIFT)
+#define ICH_LR_EL2_STATE_ACTIVE_BIT (2ULL << ICH_LR_EL2_STATE_SHIFT)
+
+#define ICH_MISR_EL2_EOI (1U << 0)
+#define ICH_MISR_EL2_U (1U << 1)
+#define ICH_MISR_EL2_LRENP (1U << 2)
+#define ICH_MISR_EL2_NP (1U << 3)
+#define ICH_MISR_EL2_VGRP0E (1U << 4)
+#define ICH_MISR_EL2_VGRP0D (1U << 5)
+#define ICH_MISR_EL2_VGRP1E (1U << 6)
+#define ICH_MISR_EL2_VGRP1D (1U << 7)
+
+#define ICH_VTR_EL2_LISTREGS_SHIFT 0
+#define ICH_VTR_EL2_TDS (1U << 19)
+#define ICH_VTR_EL2_NV4 (1U << 20)
+#define ICH_VTR_EL2_A3V (1U << 21)
+#define ICH_VTR_EL2_SEIS (1U << 22)
+#define ICH_VTR_EL2_IDBITS_SHIFT 23
+#define ICH_VTR_EL2_PREBITS_SHIFT 26
+#define ICH_VTR_EL2_PRIBITS_SHIFT 29
+
+/* ITS Registers */
+
+FIELD(GITS_BASER, SIZE, 0, 8)
+FIELD(GITS_BASER, PAGESIZE, 8, 2)
+FIELD(GITS_BASER, SHAREABILITY, 10, 2)
+FIELD(GITS_BASER, PHYADDR, 12, 36)
+FIELD(GITS_BASER, PHYADDRL_64K, 16, 32)
+FIELD(GITS_BASER, PHYADDRH_64K, 12, 4)
+FIELD(GITS_BASER, ENTRYSIZE, 48, 5)
+FIELD(GITS_BASER, OUTERCACHE, 53, 3)
+FIELD(GITS_BASER, TYPE, 56, 3)
+FIELD(GITS_BASER, INNERCACHE, 59, 3)
+FIELD(GITS_BASER, INDIRECT, 62, 1)
+FIELD(GITS_BASER, VALID, 63, 1)
+
+FIELD(GITS_CBASER, SIZE, 0, 8)
+FIELD(GITS_CBASER, SHAREABILITY, 10, 2)
+FIELD(GITS_CBASER, PHYADDR, 12, 40)
+FIELD(GITS_CBASER, OUTERCACHE, 53, 3)
+FIELD(GITS_CBASER, INNERCACHE, 59, 3)
+FIELD(GITS_CBASER, VALID, 63, 1)
+
+FIELD(GITS_CREADR, STALLED, 0, 1)
+FIELD(GITS_CREADR, OFFSET, 5, 15)
+
+FIELD(GITS_CWRITER, RETRY, 0, 1)
+FIELD(GITS_CWRITER, OFFSET, 5, 15)
+
+FIELD(GITS_CTLR, ENABLED, 0, 1)
+FIELD(GITS_CTLR, QUIESCENT, 31, 1)
+
+FIELD(GITS_TYPER, PHYSICAL, 0, 1)
+FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4)
+FIELD(GITS_TYPER, IDBITS, 8, 5)
+FIELD(GITS_TYPER, DEVBITS, 13, 5)
+FIELD(GITS_TYPER, SEIS, 18, 1)
+FIELD(GITS_TYPER, PTA, 19, 1)
+FIELD(GITS_TYPER, CIDBITS, 32, 4)
+FIELD(GITS_TYPER, CIL, 36, 1)
+
+#define GITS_IDREGS 0xFFD0
+
+#define ITS_CTLR_ENABLED (1U) /* ITS Enabled */
+
+#define GITS_BASER_RO_MASK (R_GITS_BASER_ENTRYSIZE_MASK | \
+ R_GITS_BASER_TYPE_MASK)
+
+#define GITS_BASER_PAGESIZE_4K 0
+#define GITS_BASER_PAGESIZE_16K 1
+#define GITS_BASER_PAGESIZE_64K 2
+
+#define GITS_BASER_TYPE_DEVICE 1ULL
+#define GITS_BASER_TYPE_COLLECTION 4ULL
+
+#define GITS_PAGE_SIZE_4K 0x1000
+#define GITS_PAGE_SIZE_16K 0x4000
+#define GITS_PAGE_SIZE_64K 0x10000
+
+#define L1TABLE_ENTRY_SIZE 8
+
+#define LPI_CTE_ENABLED TABLE_ENTRY_VALID_MASK
+#define LPI_PRIORITY_MASK 0xfc
+
+#define GITS_CMDQ_ENTRY_SIZE 32
+#define NUM_BYTES_IN_DW 8
+
+#define CMD_MASK 0xff
+
+/* ITS Commands */
+#define GITS_CMD_CLEAR 0x04
+#define GITS_CMD_DISCARD 0x0F
+#define GITS_CMD_INT 0x03
+#define GITS_CMD_MAPC 0x09
+#define GITS_CMD_MAPD 0x08
+#define GITS_CMD_MAPI 0x0B
+#define GITS_CMD_MAPTI 0x0A
+#define GITS_CMD_INV 0x0C
+#define GITS_CMD_INVALL 0x0D
+#define GITS_CMD_SYNC 0x05
+
+/* MAPC command fields */
+#define ICID_LENGTH 16
+#define ICID_MASK ((1U << ICID_LENGTH) - 1)
+FIELD(MAPC, RDBASE, 16, 32)
+
+#define RDBASE_PROCNUM_LENGTH 16
+#define RDBASE_PROCNUM_MASK ((1ULL << RDBASE_PROCNUM_LENGTH) - 1)
+
+/* MAPD command fields */
+#define ITTADDR_LENGTH 44
+#define ITTADDR_SHIFT 8
+#define ITTADDR_MASK MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH)
+#define SIZE_MASK 0x1f
+
+/* MAPI command fields */
+#define EVENTID_MASK ((1ULL << 32) - 1)
+
+/* MAPTI command fields */
+#define pINTID_SHIFT 32
+#define pINTID_MASK MAKE_64BIT_MASK(32, 32)
+
+#define DEVID_SHIFT 32
+#define DEVID_MASK MAKE_64BIT_MASK(32, 32)
+
+#define VALID_SHIFT 63
+#define CMD_FIELD_VALID_MASK (1ULL << VALID_SHIFT)
+#define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK
+#define TABLE_ENTRY_VALID_MASK (1ULL << 0)
+
+/**
+ * Default features advertised by this version of ITS
+ */
+/* Physical LPIs supported */
+#define GITS_TYPE_PHYSICAL (1U << 0)
+
+/*
+ * 12 bytes Interrupt translation Table Entry size
+ * as per Table 5.3 in GICv3 spec
+ * ITE Lower 8 Bytes
+ * Bits: | 49 ... 26 | 25 ... 2 | 1 | 0 |
+ * Values: | 1023 | IntNum | IntType | Valid |
+ * ITE Higher 4 Bytes
+ * Bits: | 31 ... 16 | 15 ...0 |
+ * Values: | vPEID | ICID |
+ */
+#define ITS_ITT_ENTRY_SIZE 0xC
+#define ITE_ENTRY_INTTYPE_SHIFT 1
+#define ITE_ENTRY_INTID_SHIFT 2
+#define ITE_ENTRY_INTID_MASK MAKE_64BIT_MASK(2, 24)
+#define ITE_ENTRY_INTSP_SHIFT 26
+#define ITE_ENTRY_ICID_MASK MAKE_64BIT_MASK(0, 16)
+
+/* 16 bits EventId */
+#define ITS_IDBITS GICD_TYPER_IDBITS
+
+/* 16 bits DeviceId */
+#define ITS_DEVBITS 0xF
+
+/* 16 bits CollectionId */
+#define ITS_CIDBITS 0xF
+
+/*
+ * 8 bytes Device Table Entry size
+ * Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
+ */
+#define GITS_DTE_SIZE (0x8ULL)
+#define GITS_DTE_ITTADDR_SHIFT 6
+#define GITS_DTE_ITTADDR_MASK MAKE_64BIT_MASK(GITS_DTE_ITTADDR_SHIFT, \
+ ITTADDR_LENGTH)
+
+/*
+ * 8 bytes Collection Table Entry size
+ * Valid = 1 bit,RDBase = 36 bits(considering max RDBASE)
+ */
+#define GITS_CTE_SIZE (0x8ULL)
+#define GITS_CTE_RDBASE_PROCNUM_MASK MAKE_64BIT_MASK(1, RDBASE_PROCNUM_LENGTH)
+
+/* Special interrupt IDs */
+#define INTID_SECURE 1020
+#define INTID_NONSECURE 1021
+#define INTID_SPURIOUS 1023
+
+/* Functions internal to the emulated GICv3 */
+
+/**
+ * gicv3_intid_is_special:
+ * @intid: interrupt ID
+ *
+ * Return true if @intid is a special interrupt ID (1020 to
+ * 1023 inclusive). This corresponds to the GIC spec pseudocode
+ * IsSpecial() function.
+ */
+static inline bool gicv3_intid_is_special(int intid)
+{
+ return intid >= INTID_SECURE && intid <= INTID_SPURIOUS;
+}
+
+/**
+ * gicv3_redist_update:
+ * @cs: GICv3CPUState for this redistributor
+ *
+ * Recalculate the highest priority pending interrupt after a
+ * change to redistributor state, and inform the CPU accordingly.
+ */
+void gicv3_redist_update(GICv3CPUState *cs);
+
+/**
+ * gicv3_update:
+ * @s: GICv3State
+ * @start: first interrupt whose state changed
+ * @len: length of the range of interrupts whose state changed
+ *
+ * Recalculate the highest priority pending interrupts after a
+ * change to the distributor state affecting @len interrupts
+ * starting at @start, and inform the CPUs accordingly.
+ */
+void gicv3_update(GICv3State *s, int start, int len);
+
+/**
+ * gicv3_full_update_noirqset:
+ * @s: GICv3State
+ *
+ * Recalculate the cached information about highest priority
+ * pending interrupts, but don't inform the CPUs. This should be
+ * called after an incoming migration has loaded new state.
+ */
+void gicv3_full_update_noirqset(GICv3State *s);
+
+/**
+ * gicv3_full_update:
+ * @s: GICv3State
+ *
+ * Recalculate the highest priority pending interrupts after
+ * a change that could affect the status of all interrupts,
+ * and inform the CPUs accordingly.
+ */
+void gicv3_full_update(GICv3State *s);
+MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data,
+ unsigned size, MemTxAttrs attrs);
+MemTxResult gicv3_dist_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned size, MemTxAttrs attrs);
+MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
+ unsigned size, MemTxAttrs attrs);
+MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size, MemTxAttrs attrs);
+void gicv3_dist_set_irq(GICv3State *s, int irq, int level);
+void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level);
+void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level);
+void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level);
+/**
+ * gicv3_redist_update_lpi:
+ * @cs: GICv3CPUState
+ *
+ * Scan the LPI pending table and recalculate the highest priority
+ * pending LPI and also the overall highest priority pending interrupt.
+ */
+void gicv3_redist_update_lpi(GICv3CPUState *cs);
+/**
+ * gicv3_redist_update_lpi_only:
+ * @cs: GICv3CPUState
+ *
+ * Scan the LPI pending table and recalculate cs->hpplpi only,
+ * without calling gicv3_redist_update() to recalculate the overall
+ * highest priority pending interrupt. This should be called after
+ * an incoming migration has loaded new state.
+ */
+void gicv3_redist_update_lpi_only(GICv3CPUState *cs);
+void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns);
+void gicv3_init_cpuif(GICv3State *s);
+
+/**
+ * gicv3_cpuif_update:
+ * @cs: GICv3CPUState for the CPU to update
+ *
+ * Recalculate whether to assert the IRQ or FIQ lines after a change
+ * to the current highest priority pending interrupt, the CPU's
+ * current running priority or the CPU's current exception level or
+ * security state.
+ */
+void gicv3_cpuif_update(GICv3CPUState *cs);
+
+static inline uint32_t gicv3_iidr(void)
+{
+ /* Return the Implementer Identification Register value
+ * for the emulated GICv3, as reported in GICD_IIDR and GICR_IIDR.
+ *
+ * We claim to be an ARM r0p0 with a zero ProductID.
+ * This is the same as an r0p0 GIC-500.
+ */
+ return 0x43b;
+}
+
+static inline uint32_t gicv3_idreg(int regoffset)
+{
+ /* Return the value of the CoreSight ID register at the specified
+ * offset from the first ID register (as found in the distributor
+ * and redistributor register banks).
+ * These values indicate an ARM implementation of a GICv3.
+ */
+ static const uint8_t gicd_ids[] = {
+ 0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x3B, 0x00, 0x0D, 0xF0, 0x05, 0xB1
+ };
+ return gicd_ids[regoffset / 4];
+}
+
+/**
+ * gicv3_irq_group:
+ *
+ * Return the group which this interrupt is configured as (GICV3_G0,
+ * GICV3_G1 or GICV3_G1NS).
+ */
+static inline int gicv3_irq_group(GICv3State *s, GICv3CPUState *cs, int irq)
+{
+ bool grpbit, grpmodbit;
+
+ if (irq < GIC_INTERNAL) {
+ grpbit = extract32(cs->gicr_igroupr0, irq, 1);
+ grpmodbit = extract32(cs->gicr_igrpmodr0, irq, 1);
+ } else {
+ grpbit = gicv3_gicd_group_test(s, irq);
+ grpmodbit = gicv3_gicd_grpmod_test(s, irq);
+ }
+ if (grpbit) {
+ return GICV3_G1NS;
+ }
+ if (s->gicd_ctlr & GICD_CTLR_DS) {
+ return GICV3_G0;
+ }
+ return grpmodbit ? GICV3_G1 : GICV3_G0;
+}
+
+/**
+ * gicv3_redist_affid:
+ *
+ * Return the 32-bit affinity ID of the CPU connected to this redistributor
+ */
+static inline uint32_t gicv3_redist_affid(GICv3CPUState *cs)
+{
+ return cs->gicr_typer >> 32;
+}
+
+/**
+ * gicv3_cache_target_cpustate:
+ *
+ * Update the cached CPU state corresponding to the target for this interrupt
+ * (which is kept in s->gicd_irouter_target[]).
+ */
+static inline void gicv3_cache_target_cpustate(GICv3State *s, int irq)
+{
+ GICv3CPUState *cs = NULL;
+ int i;
+ uint32_t tgtaff = extract64(s->gicd_irouter[irq], 0, 24) |
+ extract64(s->gicd_irouter[irq], 32, 8) << 24;
+
+ for (i = 0; i < s->num_cpu; i++) {
+ if (s->cpu[i].gicr_typer >> 32 == tgtaff) {
+ cs = &s->cpu[i];
+ break;
+ }
+ }
+
+ s->gicd_irouter_target[irq] = cs;
+}
+
+/**
+ * gicv3_cache_all_target_cpustates:
+ *
+ * Populate the entire cache of CPU state pointers for interrupt targets
+ * (eg after inbound migration or CPU reset)
+ */
+static inline void gicv3_cache_all_target_cpustates(GICv3State *s)
+{
+ int irq;
+
+ for (irq = GIC_INTERNAL; irq < GICV3_MAXIRQ; irq++) {
+ gicv3_cache_target_cpustate(s, irq);
+ }
+}
+
+void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s);
+
+#endif /* QEMU_ARM_GICV3_INTERNAL_H */
diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c
new file mode 100644
index 000000000..dfd53275f
--- /dev/null
+++ b/hw/intc/goldfish_pic.c
@@ -0,0 +1,219 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Goldfish PIC
+ *
+ * (c) 2020 Laurent Vivier <laurent@vivier.eu>
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "monitor/monitor.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/intc/intc.h"
+#include "hw/intc/goldfish_pic.h"
+
+/* registers */
+
+enum {
+ REG_STATUS = 0x00,
+ REG_IRQ_PENDING = 0x04,
+ REG_IRQ_DISABLE_ALL = 0x08,
+ REG_DISABLE = 0x0c,
+ REG_ENABLE = 0x10,
+};
+
+static bool goldfish_pic_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts,
+ unsigned int *nb_irqs)
+{
+ GoldfishPICState *s = GOLDFISH_PIC(obj);
+
+ *irq_counts = s->stats_irq_count;
+ *nb_irqs = ARRAY_SIZE(s->stats_irq_count);
+ return true;
+}
+
+static void goldfish_pic_print_info(InterruptStatsProvider *obj, Monitor *mon)
+{
+ GoldfishPICState *s = GOLDFISH_PIC(obj);
+ monitor_printf(mon, "goldfish-pic.%d: pending=0x%08x enabled=0x%08x\n",
+ s->idx, s->pending, s->enabled);
+}
+
+static void goldfish_pic_update(GoldfishPICState *s)
+{
+ if (s->pending & s->enabled) {
+ qemu_irq_raise(s->irq);
+ } else {
+ qemu_irq_lower(s->irq);
+ }
+}
+
+static void goldfish_irq_request(void *opaque, int irq, int level)
+{
+ GoldfishPICState *s = opaque;
+
+ trace_goldfish_irq_request(s, s->idx, irq, level);
+
+ if (level) {
+ s->pending |= 1 << irq;
+ s->stats_irq_count[irq]++;
+ } else {
+ s->pending &= ~(1 << irq);
+ }
+ goldfish_pic_update(s);
+}
+
+static uint64_t goldfish_pic_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ GoldfishPICState *s = opaque;
+ uint64_t value = 0;
+
+ switch (addr) {
+ case REG_STATUS:
+ /* The number of pending interrupts (0 to 32) */
+ value = ctpop32(s->pending & s->enabled);
+ break;
+ case REG_IRQ_PENDING:
+ /* The pending interrupt mask */
+ value = s->pending & s->enabled;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%s: unimplemented register read 0x%02"HWADDR_PRIx"\n",
+ __func__, addr);
+ break;
+ }
+
+ trace_goldfish_pic_read(s, s->idx, addr, size, value);
+
+ return value;
+}
+
+static void goldfish_pic_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ GoldfishPICState *s = opaque;
+
+ trace_goldfish_pic_write(s, s->idx, addr, size, value);
+
+ switch (addr) {
+ case REG_IRQ_DISABLE_ALL:
+ s->enabled = 0;
+ s->pending = 0;
+ break;
+ case REG_DISABLE:
+ s->enabled &= ~value;
+ break;
+ case REG_ENABLE:
+ s->enabled |= value;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%s: unimplemented register write 0x%02"HWADDR_PRIx"\n",
+ __func__, addr);
+ break;
+ }
+ goldfish_pic_update(s);
+}
+
+static const MemoryRegionOps goldfish_pic_ops = {
+ .read = goldfish_pic_read,
+ .write = goldfish_pic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+};
+
+static void goldfish_pic_reset(DeviceState *dev)
+{
+ GoldfishPICState *s = GOLDFISH_PIC(dev);
+ int i;
+
+ trace_goldfish_pic_reset(s, s->idx);
+ s->pending = 0;
+ s->enabled = 0;
+
+ for (i = 0; i < ARRAY_SIZE(s->stats_irq_count); i++) {
+ s->stats_irq_count[i] = 0;
+ }
+}
+
+static void goldfish_pic_realize(DeviceState *dev, Error **errp)
+{
+ GoldfishPICState *s = GOLDFISH_PIC(dev);
+
+ trace_goldfish_pic_realize(s, s->idx);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &goldfish_pic_ops, s,
+ "goldfish_pic", 0x24);
+}
+
+static const VMStateDescription vmstate_goldfish_pic = {
+ .name = "goldfish_pic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(pending, GoldfishPICState),
+ VMSTATE_UINT32(enabled, GoldfishPICState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void goldfish_pic_instance_init(Object *obj)
+{
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
+ GoldfishPICState *s = GOLDFISH_PIC(obj);
+
+ trace_goldfish_pic_instance_init(s);
+
+ sysbus_init_mmio(dev, &s->iomem);
+ sysbus_init_irq(dev, &s->irq);
+
+ qdev_init_gpio_in(DEVICE(obj), goldfish_irq_request, GOLDFISH_PIC_IRQ_NB);
+}
+
+static Property goldfish_pic_properties[] = {
+ DEFINE_PROP_UINT8("index", GoldfishPICState, idx, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void goldfish_pic_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(oc);
+
+ dc->reset = goldfish_pic_reset;
+ dc->realize = goldfish_pic_realize;
+ dc->vmsd = &vmstate_goldfish_pic;
+ ic->get_statistics = goldfish_pic_get_statistics;
+ ic->print_info = goldfish_pic_print_info;
+ device_class_set_props(dc, goldfish_pic_properties);
+}
+
+static const TypeInfo goldfish_pic_info = {
+ .name = TYPE_GOLDFISH_PIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .class_init = goldfish_pic_class_init,
+ .instance_init = goldfish_pic_instance_init,
+ .instance_size = sizeof(GoldfishPICState),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
+};
+
+static void goldfish_pic_register_types(void)
+{
+ type_register_static(&goldfish_pic_info);
+}
+
+type_init(goldfish_pic_register_types)
diff --git a/hw/intc/grlib_irqmp.c b/hw/intc/grlib_irqmp.c
new file mode 100644
index 000000000..3bfe2544b
--- /dev/null
+++ b/hw/intc/grlib_irqmp.c
@@ -0,0 +1,362 @@
+/*
+ * QEMU GRLIB IRQMP Emulator
+ *
+ * (Multiprocessor and extended interrupt not supported)
+ *
+ * Copyright (c) 2010-2019 AdaCore
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+
+#include "hw/qdev-properties.h"
+#include "hw/sparc/grlib.h"
+
+#include "trace.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "qom/object.h"
+
+#define IRQMP_MAX_CPU 16
+#define IRQMP_REG_SIZE 256 /* Size of memory mapped registers */
+
+/* Memory mapped register offsets */
+#define LEVEL_OFFSET 0x00
+#define PENDING_OFFSET 0x04
+#define FORCE0_OFFSET 0x08
+#define CLEAR_OFFSET 0x0C
+#define MP_STATUS_OFFSET 0x10
+#define BROADCAST_OFFSET 0x14
+#define MASK_OFFSET 0x40
+#define FORCE_OFFSET 0x80
+#define EXTENDED_OFFSET 0xC0
+
+#define MAX_PILS 16
+
+OBJECT_DECLARE_SIMPLE_TYPE(IRQMP, GRLIB_IRQMP)
+
+typedef struct IRQMPState IRQMPState;
+
+struct IRQMP {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+
+ IRQMPState *state;
+ qemu_irq irq;
+};
+
+struct IRQMPState {
+ uint32_t level;
+ uint32_t pending;
+ uint32_t clear;
+ uint32_t broadcast;
+
+ uint32_t mask[IRQMP_MAX_CPU];
+ uint32_t force[IRQMP_MAX_CPU];
+ uint32_t extended[IRQMP_MAX_CPU];
+
+ IRQMP *parent;
+};
+
+static void grlib_irqmp_check_irqs(IRQMPState *state)
+{
+ uint32_t pend = 0;
+ uint32_t level0 = 0;
+ uint32_t level1 = 0;
+
+ assert(state != NULL);
+ assert(state->parent != NULL);
+
+ /* IRQ for CPU 0 (no SMP support) */
+ pend = (state->pending | state->force[0])
+ & state->mask[0];
+
+ level0 = pend & ~state->level;
+ level1 = pend & state->level;
+
+ trace_grlib_irqmp_check_irqs(state->pending, state->force[0],
+ state->mask[0], level1, level0);
+
+ /* Trigger level1 interrupt first and level0 if there is no level1 */
+ qemu_set_irq(state->parent->irq, level1 ?: level0);
+}
+
+static void grlib_irqmp_ack_mask(IRQMPState *state, uint32_t mask)
+{
+ /* Clear registers */
+ state->pending &= ~mask;
+ state->force[0] &= ~mask; /* Only CPU 0 (No SMP support) */
+
+ grlib_irqmp_check_irqs(state);
+}
+
+void grlib_irqmp_ack(DeviceState *dev, int intno)
+{
+ IRQMP *irqmp = GRLIB_IRQMP(dev);
+ IRQMPState *state;
+ uint32_t mask;
+
+ state = irqmp->state;
+ assert(state != NULL);
+
+ intno &= 15;
+ mask = 1 << intno;
+
+ trace_grlib_irqmp_ack(intno);
+
+ grlib_irqmp_ack_mask(state, mask);
+}
+
+static void grlib_irqmp_set_irq(void *opaque, int irq, int level)
+{
+ IRQMP *irqmp = GRLIB_IRQMP(opaque);
+ IRQMPState *s;
+ int i = 0;
+
+ s = irqmp->state;
+ assert(s != NULL);
+ assert(s->parent != NULL);
+
+
+ if (level) {
+ trace_grlib_irqmp_set_irq(irq);
+
+ if (s->broadcast & 1 << irq) {
+ /* Broadcasted IRQ */
+ for (i = 0; i < IRQMP_MAX_CPU; i++) {
+ s->force[i] |= 1 << irq;
+ }
+ } else {
+ s->pending |= 1 << irq;
+ }
+ grlib_irqmp_check_irqs(s);
+
+ }
+}
+
+static uint64_t grlib_irqmp_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ IRQMP *irqmp = opaque;
+ IRQMPState *state;
+
+ assert(irqmp != NULL);
+ state = irqmp->state;
+ assert(state != NULL);
+
+ addr &= 0xff;
+
+ /* global registers */
+ switch (addr) {
+ case LEVEL_OFFSET:
+ return state->level;
+
+ case PENDING_OFFSET:
+ return state->pending;
+
+ case FORCE0_OFFSET:
+ /* This register is an "alias" for the force register of CPU 0 */
+ return state->force[0];
+
+ case CLEAR_OFFSET:
+ case MP_STATUS_OFFSET:
+ /* Always read as 0 */
+ return 0;
+
+ case BROADCAST_OFFSET:
+ return state->broadcast;
+
+ default:
+ break;
+ }
+
+ /* mask registers */
+ if (addr >= MASK_OFFSET && addr < FORCE_OFFSET) {
+ int cpu = (addr - MASK_OFFSET) / 4;
+ assert(cpu >= 0 && cpu < IRQMP_MAX_CPU);
+
+ return state->mask[cpu];
+ }
+
+ /* force registers */
+ if (addr >= FORCE_OFFSET && addr < EXTENDED_OFFSET) {
+ int cpu = (addr - FORCE_OFFSET) / 4;
+ assert(cpu >= 0 && cpu < IRQMP_MAX_CPU);
+
+ return state->force[cpu];
+ }
+
+ /* extended (not supported) */
+ if (addr >= EXTENDED_OFFSET && addr < IRQMP_REG_SIZE) {
+ int cpu = (addr - EXTENDED_OFFSET) / 4;
+ assert(cpu >= 0 && cpu < IRQMP_MAX_CPU);
+
+ return state->extended[cpu];
+ }
+
+ trace_grlib_irqmp_readl_unknown(addr);
+ return 0;
+}
+
+static void grlib_irqmp_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ IRQMP *irqmp = opaque;
+ IRQMPState *state;
+
+ assert(irqmp != NULL);
+ state = irqmp->state;
+ assert(state != NULL);
+
+ addr &= 0xff;
+
+ /* global registers */
+ switch (addr) {
+ case LEVEL_OFFSET:
+ value &= 0xFFFF << 1; /* clean up the value */
+ state->level = value;
+ return;
+
+ case PENDING_OFFSET:
+ /* Read Only */
+ return;
+
+ case FORCE0_OFFSET:
+ /* This register is an "alias" for the force register of CPU 0 */
+
+ value &= 0xFFFE; /* clean up the value */
+ state->force[0] = value;
+ grlib_irqmp_check_irqs(irqmp->state);
+ return;
+
+ case CLEAR_OFFSET:
+ value &= ~1; /* clean up the value */
+ grlib_irqmp_ack_mask(state, value);
+ return;
+
+ case MP_STATUS_OFFSET:
+ /* Read Only (no SMP support) */
+ return;
+
+ case BROADCAST_OFFSET:
+ value &= 0xFFFE; /* clean up the value */
+ state->broadcast = value;
+ return;
+
+ default:
+ break;
+ }
+
+ /* mask registers */
+ if (addr >= MASK_OFFSET && addr < FORCE_OFFSET) {
+ int cpu = (addr - MASK_OFFSET) / 4;
+ assert(cpu >= 0 && cpu < IRQMP_MAX_CPU);
+
+ value &= ~1; /* clean up the value */
+ state->mask[cpu] = value;
+ grlib_irqmp_check_irqs(irqmp->state);
+ return;
+ }
+
+ /* force registers */
+ if (addr >= FORCE_OFFSET && addr < EXTENDED_OFFSET) {
+ int cpu = (addr - FORCE_OFFSET) / 4;
+ assert(cpu >= 0 && cpu < IRQMP_MAX_CPU);
+
+ uint32_t force = value & 0xFFFE;
+ uint32_t clear = (value >> 16) & 0xFFFE;
+ uint32_t old = state->force[cpu];
+
+ state->force[cpu] = (old | force) & ~clear;
+ grlib_irqmp_check_irqs(irqmp->state);
+ return;
+ }
+
+ /* extended (not supported) */
+ if (addr >= EXTENDED_OFFSET && addr < IRQMP_REG_SIZE) {
+ int cpu = (addr - EXTENDED_OFFSET) / 4;
+ assert(cpu >= 0 && cpu < IRQMP_MAX_CPU);
+
+ value &= 0xF; /* clean up the value */
+ state->extended[cpu] = value;
+ return;
+ }
+
+ trace_grlib_irqmp_writel_unknown(addr, value);
+}
+
+static const MemoryRegionOps grlib_irqmp_ops = {
+ .read = grlib_irqmp_read,
+ .write = grlib_irqmp_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void grlib_irqmp_reset(DeviceState *d)
+{
+ IRQMP *irqmp = GRLIB_IRQMP(d);
+ assert(irqmp->state != NULL);
+
+ memset(irqmp->state, 0, sizeof *irqmp->state);
+ irqmp->state->parent = irqmp;
+}
+
+static void grlib_irqmp_init(Object *obj)
+{
+ IRQMP *irqmp = GRLIB_IRQMP(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
+
+ qdev_init_gpio_in(DEVICE(obj), grlib_irqmp_set_irq, MAX_PILS);
+ qdev_init_gpio_out_named(DEVICE(obj), &irqmp->irq, "grlib-irq", 1);
+ memory_region_init_io(&irqmp->iomem, obj, &grlib_irqmp_ops, irqmp,
+ "irqmp", IRQMP_REG_SIZE);
+
+ irqmp->state = g_malloc0(sizeof *irqmp->state);
+
+ sysbus_init_mmio(dev, &irqmp->iomem);
+}
+
+static void grlib_irqmp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = grlib_irqmp_reset;
+}
+
+static const TypeInfo grlib_irqmp_info = {
+ .name = TYPE_GRLIB_IRQMP,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IRQMP),
+ .instance_init = grlib_irqmp_init,
+ .class_init = grlib_irqmp_class_init,
+};
+
+static void grlib_irqmp_register_types(void)
+{
+ type_register_static(&grlib_irqmp_info);
+}
+
+type_init(grlib_irqmp_register_types)
diff --git a/hw/intc/heathrow_pic.c b/hw/intc/heathrow_pic.c
new file mode 100644
index 000000000..cb97c315d
--- /dev/null
+++ b/hw/intc/heathrow_pic.c
@@ -0,0 +1,210 @@
+/*
+ * Heathrow PIC support (OldWorld PowerMac)
+ *
+ * Copyright (c) 2005-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/ppc/mac.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+#include "hw/intc/heathrow_pic.h"
+#include "hw/irq.h"
+#include "trace.h"
+
+static inline int heathrow_check_irq(HeathrowPICState *pic)
+{
+ return (pic->events | (pic->levels & pic->level_triggered)) & pic->mask;
+}
+
+/* update the CPU irq state */
+static void heathrow_update_irq(HeathrowState *s)
+{
+ if (heathrow_check_irq(&s->pics[0]) ||
+ heathrow_check_irq(&s->pics[1])) {
+ qemu_irq_raise(s->irqs[0]);
+ } else {
+ qemu_irq_lower(s->irqs[0]);
+ }
+}
+
+static void heathrow_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ HeathrowState *s = opaque;
+ HeathrowPICState *pic;
+ unsigned int n;
+
+ n = ((addr & 0xfff) - 0x10) >> 4;
+ trace_heathrow_write(addr, n, value);
+ if (n >= 2)
+ return;
+ pic = &s->pics[n];
+ switch(addr & 0xf) {
+ case 0x04:
+ pic->mask = value;
+ heathrow_update_irq(s);
+ break;
+ case 0x08:
+ /* do not reset level triggered IRQs */
+ value &= ~pic->level_triggered;
+ pic->events &= ~value;
+ heathrow_update_irq(s);
+ break;
+ default:
+ break;
+ }
+}
+
+static uint64_t heathrow_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ HeathrowState *s = opaque;
+ HeathrowPICState *pic;
+ unsigned int n;
+ uint32_t value;
+
+ n = ((addr & 0xfff) - 0x10) >> 4;
+ if (n >= 2) {
+ value = 0;
+ } else {
+ pic = &s->pics[n];
+ switch(addr & 0xf) {
+ case 0x0:
+ value = pic->events;
+ break;
+ case 0x4:
+ value = pic->mask;
+ break;
+ case 0xc:
+ value = pic->levels;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ }
+ trace_heathrow_read(addr, n, value);
+ return value;
+}
+
+static const MemoryRegionOps heathrow_ops = {
+ .read = heathrow_read,
+ .write = heathrow_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void heathrow_set_irq(void *opaque, int num, int level)
+{
+ HeathrowState *s = opaque;
+ HeathrowPICState *pic;
+ unsigned int irq_bit;
+ int last_level;
+
+ pic = &s->pics[1 - (num >> 5)];
+ irq_bit = 1 << (num & 0x1f);
+ last_level = (pic->levels & irq_bit) ? 1 : 0;
+
+ if (level) {
+ pic->events |= irq_bit & ~pic->level_triggered;
+ pic->levels |= irq_bit;
+ } else {
+ pic->levels &= ~irq_bit;
+ }
+
+ if (last_level != level) {
+ trace_heathrow_set_irq(num, level);
+ }
+
+ heathrow_update_irq(s);
+}
+
+static const VMStateDescription vmstate_heathrow_pic_one = {
+ .name = "heathrow_pic_one",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(events, HeathrowPICState),
+ VMSTATE_UINT32(mask, HeathrowPICState),
+ VMSTATE_UINT32(levels, HeathrowPICState),
+ VMSTATE_UINT32(level_triggered, HeathrowPICState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_heathrow = {
+ .name = "heathrow_pic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(pics, HeathrowState, 2, 1,
+ vmstate_heathrow_pic_one, HeathrowPICState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void heathrow_reset(DeviceState *d)
+{
+ HeathrowState *s = HEATHROW(d);
+
+ s->pics[0].level_triggered = 0;
+ s->pics[1].level_triggered = 0x1ff00000;
+}
+
+static void heathrow_init(Object *obj)
+{
+ HeathrowState *s = HEATHROW(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ /* only 1 CPU */
+ qdev_init_gpio_out(DEVICE(obj), s->irqs, 1);
+
+ qdev_init_gpio_in(DEVICE(obj), heathrow_set_irq, HEATHROW_NUM_IRQS);
+
+ memory_region_init_io(&s->mem, OBJECT(s), &heathrow_ops, s,
+ "heathrow-pic", 0x1000);
+ sysbus_init_mmio(sbd, &s->mem);
+}
+
+static void heathrow_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->reset = heathrow_reset;
+ dc->vmsd = &vmstate_heathrow;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo heathrow_type_info = {
+ .name = TYPE_HEATHROW,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(HeathrowState),
+ .instance_init = heathrow_init,
+ .class_init = heathrow_class_init,
+};
+
+static void heathrow_register_types(void)
+{
+ type_register_static(&heathrow_type_info);
+}
+
+type_init(heathrow_register_types)
diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c
new file mode 100644
index 000000000..cc4e21ffe
--- /dev/null
+++ b/hw/intc/i8259.c
@@ -0,0 +1,466 @@
+/*
+ * QEMU 8259 interrupt controller emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/i8259.h"
+#include "hw/irq.h"
+#include "hw/isa/isa.h"
+#include "qemu/timer.h"
+#include "qemu/log.h"
+#include "hw/isa/i8259_internal.h"
+#include "trace.h"
+#include "qom/object.h"
+
+/* debug PIC */
+//#define DEBUG_PIC
+
+//#define DEBUG_IRQ_LATENCY
+
+#define TYPE_I8259 "isa-i8259"
+typedef struct PICClass PICClass;
+DECLARE_CLASS_CHECKERS(PICClass, PIC,
+ TYPE_I8259)
+
+/**
+ * PICClass:
+ * @parent_realize: The parent's realizefn.
+ */
+struct PICClass {
+ PICCommonClass parent_class;
+
+ DeviceRealize parent_realize;
+};
+
+#ifdef DEBUG_IRQ_LATENCY
+static int64_t irq_time[16];
+#endif
+DeviceState *isa_pic;
+static PICCommonState *slave_pic;
+
+/* return the highest priority found in mask (highest = smallest
+ number). Return 8 if no irq */
+static int get_priority(PICCommonState *s, int mask)
+{
+ int priority;
+
+ if (mask == 0) {
+ return 8;
+ }
+ priority = 0;
+ while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) {
+ priority++;
+ }
+ return priority;
+}
+
+/* return the pic wanted interrupt. return -1 if none */
+static int pic_get_irq(PICCommonState *s)
+{
+ int mask, cur_priority, priority;
+
+ mask = s->irr & ~s->imr;
+ priority = get_priority(s, mask);
+ if (priority == 8) {
+ return -1;
+ }
+ /* compute current priority. If special fully nested mode on the
+ master, the IRQ coming from the slave is not taken into account
+ for the priority computation. */
+ mask = s->isr;
+ if (s->special_mask) {
+ mask &= ~s->imr;
+ }
+ if (s->special_fully_nested_mode && s->master) {
+ mask &= ~(1 << 2);
+ }
+ cur_priority = get_priority(s, mask);
+ if (priority < cur_priority) {
+ /* higher priority found: an irq should be generated */
+ return (priority + s->priority_add) & 7;
+ } else {
+ return -1;
+ }
+}
+
+/* Update INT output. Must be called every time the output may have changed. */
+static void pic_update_irq(PICCommonState *s)
+{
+ int irq;
+
+ irq = pic_get_irq(s);
+ if (irq >= 0) {
+ trace_pic_update_irq(s->master, s->imr, s->irr, s->priority_add);
+ qemu_irq_raise(s->int_out[0]);
+ } else {
+ qemu_irq_lower(s->int_out[0]);
+ }
+}
+
+/* set irq level. If an edge is detected, then the IRR is set to 1 */
+static void pic_set_irq(void *opaque, int irq, int level)
+{
+ PICCommonState *s = opaque;
+ int mask = 1 << irq;
+ int irq_index = s->master ? irq : irq + 8;
+
+ trace_pic_set_irq(s->master, irq, level);
+ pic_stat_update_irq(irq_index, level);
+
+#ifdef DEBUG_IRQ_LATENCY
+ if (level) {
+ irq_time[irq_index] = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ }
+#endif
+
+ if (s->elcr & mask) {
+ /* level triggered */
+ if (level) {
+ s->irr |= mask;
+ s->last_irr |= mask;
+ } else {
+ s->irr &= ~mask;
+ s->last_irr &= ~mask;
+ }
+ } else {
+ /* edge triggered */
+ if (level) {
+ if ((s->last_irr & mask) == 0) {
+ s->irr |= mask;
+ }
+ s->last_irr |= mask;
+ } else {
+ s->last_irr &= ~mask;
+ }
+ }
+ pic_update_irq(s);
+}
+
+/* acknowledge interrupt 'irq' */
+static void pic_intack(PICCommonState *s, int irq)
+{
+ if (s->auto_eoi) {
+ if (s->rotate_on_auto_eoi) {
+ s->priority_add = (irq + 1) & 7;
+ }
+ } else {
+ s->isr |= (1 << irq);
+ }
+ /* We don't clear a level sensitive interrupt here */
+ if (!(s->elcr & (1 << irq))) {
+ s->irr &= ~(1 << irq);
+ }
+ pic_update_irq(s);
+}
+
+int pic_read_irq(DeviceState *d)
+{
+ PICCommonState *s = PIC_COMMON(d);
+ int irq, intno;
+
+ irq = pic_get_irq(s);
+ if (irq >= 0) {
+ int irq2;
+
+ if (irq == 2) {
+ irq2 = pic_get_irq(slave_pic);
+ if (irq2 >= 0) {
+ pic_intack(slave_pic, irq2);
+ } else {
+ /* spurious IRQ on slave controller */
+ irq2 = 7;
+ }
+ intno = slave_pic->irq_base + irq2;
+ pic_intack(s, irq);
+ irq = irq2 + 8;
+ } else {
+ intno = s->irq_base + irq;
+ pic_intack(s, irq);
+ }
+ } else {
+ /* spurious IRQ on host controller */
+ irq = 7;
+ intno = s->irq_base + irq;
+ }
+
+#ifdef DEBUG_IRQ_LATENCY
+ printf("IRQ%d latency=%0.3fus\n",
+ irq,
+ (double)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
+ irq_time[irq]) * 1000000.0 / NANOSECONDS_PER_SECOND);
+#endif
+
+ trace_pic_interrupt(irq, intno);
+ return intno;
+}
+
+static void pic_init_reset(PICCommonState *s)
+{
+ pic_reset_common(s);
+ pic_update_irq(s);
+}
+
+static void pic_reset(DeviceState *dev)
+{
+ PICCommonState *s = PIC_COMMON(dev);
+
+ s->elcr = 0;
+ pic_init_reset(s);
+}
+
+static void pic_ioport_write(void *opaque, hwaddr addr64,
+ uint64_t val64, unsigned size)
+{
+ PICCommonState *s = opaque;
+ uint32_t addr = addr64;
+ uint32_t val = val64;
+ int priority, cmd, irq;
+
+ trace_pic_ioport_write(s->master, addr, val);
+
+ if (addr == 0) {
+ if (val & 0x10) {
+ pic_init_reset(s);
+ s->init_state = 1;
+ s->init4 = val & 1;
+ s->single_mode = val & 2;
+ if (val & 0x08) {
+ qemu_log_mask(LOG_UNIMP,
+ "i8259: level sensitive irq not supported\n");
+ }
+ } else if (val & 0x08) {
+ if (val & 0x04) {
+ s->poll = 1;
+ }
+ if (val & 0x02) {
+ s->read_reg_select = val & 1;
+ }
+ if (val & 0x40) {
+ s->special_mask = (val >> 5) & 1;
+ }
+ } else {
+ cmd = val >> 5;
+ switch (cmd) {
+ case 0:
+ case 4:
+ s->rotate_on_auto_eoi = cmd >> 2;
+ break;
+ case 1: /* end of interrupt */
+ case 5:
+ priority = get_priority(s, s->isr);
+ if (priority != 8) {
+ irq = (priority + s->priority_add) & 7;
+ s->isr &= ~(1 << irq);
+ if (cmd == 5) {
+ s->priority_add = (irq + 1) & 7;
+ }
+ pic_update_irq(s);
+ }
+ break;
+ case 3:
+ irq = val & 7;
+ s->isr &= ~(1 << irq);
+ pic_update_irq(s);
+ break;
+ case 6:
+ s->priority_add = (val + 1) & 7;
+ pic_update_irq(s);
+ break;
+ case 7:
+ irq = val & 7;
+ s->isr &= ~(1 << irq);
+ s->priority_add = (irq + 1) & 7;
+ pic_update_irq(s);
+ break;
+ default:
+ /* no operation */
+ break;
+ }
+ }
+ } else {
+ switch (s->init_state) {
+ case 0:
+ /* normal mode */
+ s->imr = val;
+ pic_update_irq(s);
+ break;
+ case 1:
+ s->irq_base = val & 0xf8;
+ s->init_state = s->single_mode ? (s->init4 ? 3 : 0) : 2;
+ break;
+ case 2:
+ if (s->init4) {
+ s->init_state = 3;
+ } else {
+ s->init_state = 0;
+ }
+ break;
+ case 3:
+ s->special_fully_nested_mode = (val >> 4) & 1;
+ s->auto_eoi = (val >> 1) & 1;
+ s->init_state = 0;
+ break;
+ }
+ }
+}
+
+static uint64_t pic_ioport_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ PICCommonState *s = opaque;
+ int ret;
+
+ if (s->poll) {
+ ret = pic_get_irq(s);
+ if (ret >= 0) {
+ pic_intack(s, ret);
+ ret |= 0x80;
+ } else {
+ ret = 0;
+ }
+ s->poll = 0;
+ } else {
+ if (addr == 0) {
+ if (s->read_reg_select) {
+ ret = s->isr;
+ } else {
+ ret = s->irr;
+ }
+ } else {
+ ret = s->imr;
+ }
+ }
+ trace_pic_ioport_read(s->master, addr, ret);
+ return ret;
+}
+
+int pic_get_output(DeviceState *d)
+{
+ PICCommonState *s = PIC_COMMON(d);
+
+ return (pic_get_irq(s) >= 0);
+}
+
+static void elcr_ioport_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ PICCommonState *s = opaque;
+ s->elcr = val & s->elcr_mask;
+}
+
+static uint64_t elcr_ioport_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ PICCommonState *s = opaque;
+ return s->elcr;
+}
+
+static const MemoryRegionOps pic_base_ioport_ops = {
+ .read = pic_ioport_read,
+ .write = pic_ioport_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static const MemoryRegionOps pic_elcr_ioport_ops = {
+ .read = elcr_ioport_read,
+ .write = elcr_ioport_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static void pic_realize(DeviceState *dev, Error **errp)
+{
+ PICCommonState *s = PIC_COMMON(dev);
+ PICClass *pc = PIC_GET_CLASS(dev);
+
+ memory_region_init_io(&s->base_io, OBJECT(s), &pic_base_ioport_ops, s,
+ "pic", 2);
+ memory_region_init_io(&s->elcr_io, OBJECT(s), &pic_elcr_ioport_ops, s,
+ "elcr", 1);
+
+ qdev_init_gpio_out(dev, s->int_out, ARRAY_SIZE(s->int_out));
+ qdev_init_gpio_in(dev, pic_set_irq, 8);
+
+ pc->parent_realize(dev, errp);
+}
+
+qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq)
+{
+ qemu_irq *irq_set;
+ DeviceState *dev;
+ ISADevice *isadev;
+ int i;
+
+ irq_set = g_new0(qemu_irq, ISA_NUM_IRQS);
+
+ isadev = i8259_init_chip(TYPE_I8259, bus, true);
+ dev = DEVICE(isadev);
+
+ qdev_connect_gpio_out(dev, 0, parent_irq);
+ for (i = 0 ; i < 8; i++) {
+ irq_set[i] = qdev_get_gpio_in(dev, i);
+ }
+
+ isa_pic = dev;
+
+ isadev = i8259_init_chip(TYPE_I8259, bus, false);
+ dev = DEVICE(isadev);
+
+ qdev_connect_gpio_out(dev, 0, irq_set[2]);
+ for (i = 0 ; i < 8; i++) {
+ irq_set[i + 8] = qdev_get_gpio_in(dev, i);
+ }
+
+ slave_pic = PIC_COMMON(dev);
+
+ return irq_set;
+}
+
+static void i8259_class_init(ObjectClass *klass, void *data)
+{
+ PICClass *k = PIC_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_parent_realize(dc, pic_realize, &k->parent_realize);
+ dc->reset = pic_reset;
+}
+
+static const TypeInfo i8259_info = {
+ .name = TYPE_I8259,
+ .instance_size = sizeof(PICCommonState),
+ .parent = TYPE_PIC_COMMON,
+ .class_init = i8259_class_init,
+ .class_size = sizeof(PICClass),
+};
+
+static void pic_register_types(void)
+{
+ type_register_static(&i8259_info);
+}
+
+type_init(pic_register_types)
diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c
new file mode 100644
index 000000000..d90b40fe4
--- /dev/null
+++ b/hw/intc/i8259_common.c
@@ -0,0 +1,219 @@
+/*
+ * QEMU 8259 - common bits of emulated and KVM kernel model
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2011 Jan Kiszka, Siemens AG
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/i8259.h"
+#include "hw/isa/i8259_internal.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "monitor/monitor.h"
+#include "qapi/error.h"
+
+static int irq_level[16];
+static uint64_t irq_count[16];
+
+void pic_reset_common(PICCommonState *s)
+{
+ s->last_irr = 0;
+ s->irr &= s->elcr;
+ s->imr = 0;
+ s->isr = 0;
+ s->priority_add = 0;
+ s->irq_base = 0;
+ s->read_reg_select = 0;
+ s->poll = 0;
+ s->special_mask = 0;
+ s->init_state = 0;
+ s->auto_eoi = 0;
+ s->rotate_on_auto_eoi = 0;
+ s->special_fully_nested_mode = 0;
+ s->init4 = 0;
+ s->single_mode = 0;
+ /* Note: ELCR is not reset */
+}
+
+static int pic_dispatch_pre_save(void *opaque)
+{
+ PICCommonState *s = opaque;
+ PICCommonClass *info = PIC_COMMON_GET_CLASS(s);
+
+ if (info->pre_save) {
+ info->pre_save(s);
+ }
+
+ return 0;
+}
+
+static int pic_dispatch_post_load(void *opaque, int version_id)
+{
+ PICCommonState *s = opaque;
+ PICCommonClass *info = PIC_COMMON_GET_CLASS(s);
+
+ if (info->post_load) {
+ info->post_load(s);
+ }
+ return 0;
+}
+
+static void pic_common_realize(DeviceState *dev, Error **errp)
+{
+ PICCommonState *s = PIC_COMMON(dev);
+ ISADevice *isa = ISA_DEVICE(dev);
+
+ isa_register_ioport(isa, &s->base_io, s->iobase);
+ if (s->elcr_addr != -1) {
+ isa_register_ioport(isa, &s->elcr_io, s->elcr_addr);
+ }
+
+ qdev_set_legacy_instance_id(dev, s->iobase, 1);
+}
+
+ISADevice *i8259_init_chip(const char *name, ISABus *bus, bool master)
+{
+ DeviceState *dev;
+ ISADevice *isadev;
+
+ isadev = isa_new(name);
+ dev = DEVICE(isadev);
+ qdev_prop_set_uint32(dev, "iobase", master ? 0x20 : 0xa0);
+ qdev_prop_set_uint32(dev, "elcr_addr", master ? 0x4d0 : 0x4d1);
+ qdev_prop_set_uint8(dev, "elcr_mask", master ? 0xf8 : 0xde);
+ qdev_prop_set_bit(dev, "master", master);
+ isa_realize_and_unref(isadev, bus, &error_fatal);
+
+ return isadev;
+}
+
+void pic_stat_update_irq(int irq, int level)
+{
+ if (level != irq_level[irq]) {
+ irq_level[irq] = level;
+ if (level == 1) {
+ irq_count[irq]++;
+ }
+ }
+}
+
+bool pic_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts, unsigned int *nb_irqs)
+{
+ PICCommonState *s = PIC_COMMON(obj);
+
+ if (s->master) {
+ *irq_counts = irq_count;
+ *nb_irqs = ARRAY_SIZE(irq_count);
+ } else {
+ *irq_counts = NULL;
+ *nb_irqs = 0;
+ }
+
+ return true;
+}
+
+void pic_print_info(InterruptStatsProvider *obj, Monitor *mon)
+{
+ PICCommonState *s = PIC_COMMON(obj);
+
+ pic_dispatch_pre_save(s);
+ monitor_printf(mon, "pic%d: irr=%02x imr=%02x isr=%02x hprio=%d "
+ "irq_base=%02x rr_sel=%d elcr=%02x fnm=%d\n",
+ s->master ? 0 : 1, s->irr, s->imr, s->isr, s->priority_add,
+ s->irq_base, s->read_reg_select, s->elcr,
+ s->special_fully_nested_mode);
+}
+
+static const VMStateDescription vmstate_pic_common = {
+ .name = "i8259",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = pic_dispatch_pre_save,
+ .post_load = pic_dispatch_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(last_irr, PICCommonState),
+ VMSTATE_UINT8(irr, PICCommonState),
+ VMSTATE_UINT8(imr, PICCommonState),
+ VMSTATE_UINT8(isr, PICCommonState),
+ VMSTATE_UINT8(priority_add, PICCommonState),
+ VMSTATE_UINT8(irq_base, PICCommonState),
+ VMSTATE_UINT8(read_reg_select, PICCommonState),
+ VMSTATE_UINT8(poll, PICCommonState),
+ VMSTATE_UINT8(special_mask, PICCommonState),
+ VMSTATE_UINT8(init_state, PICCommonState),
+ VMSTATE_UINT8(auto_eoi, PICCommonState),
+ VMSTATE_UINT8(rotate_on_auto_eoi, PICCommonState),
+ VMSTATE_UINT8(special_fully_nested_mode, PICCommonState),
+ VMSTATE_UINT8(init4, PICCommonState),
+ VMSTATE_UINT8(single_mode, PICCommonState),
+ VMSTATE_UINT8(elcr, PICCommonState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property pic_properties_common[] = {
+ DEFINE_PROP_UINT32("iobase", PICCommonState, iobase, -1),
+ DEFINE_PROP_UINT32("elcr_addr", PICCommonState, elcr_addr, -1),
+ DEFINE_PROP_UINT8("elcr_mask", PICCommonState, elcr_mask, -1),
+ DEFINE_PROP_BIT("master", PICCommonState, master, 0, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pic_common_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
+
+ dc->vmsd = &vmstate_pic_common;
+ device_class_set_props(dc, pic_properties_common);
+ dc->realize = pic_common_realize;
+ /*
+ * Reason: unlike ordinary ISA devices, the PICs need additional
+ * wiring: its IRQ input lines are set up by board code, and the
+ * wiring of the slave to the master is hard-coded in device model
+ * code.
+ */
+ dc->user_creatable = false;
+ ic->get_statistics = pic_get_statistics;
+ ic->print_info = pic_print_info;
+}
+
+static const TypeInfo pic_common_type = {
+ .name = TYPE_PIC_COMMON,
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(PICCommonState),
+ .class_size = sizeof(PICCommonClass),
+ .class_init = pic_common_class_init,
+ .abstract = true,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
+};
+
+static void pic_common_register_types(void)
+{
+ type_register_static(&pic_common_type);
+}
+
+type_init(pic_common_register_types)
diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c
new file mode 100644
index 000000000..63fc602a1
--- /dev/null
+++ b/hw/intc/imx_avic.c
@@ -0,0 +1,366 @@
+/*
+ * i.MX31 Vectored Interrupt Controller
+ *
+ * Note this is NOT the PL192 provided by ARM, but
+ * a custom implementation by Freescale.
+ *
+ * Copyright (c) 2008 OKL
+ * Copyright (c) 2011 NICTA Pty Ltd
+ * Originally written by Hans Jiang
+ * Updated by Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ *
+ * TODO: implement vectors.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/imx_avic.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+
+#ifndef DEBUG_IMX_AVIC
+#define DEBUG_IMX_AVIC 0
+#endif
+
+#define DPRINTF(fmt, args...) \
+ do { \
+ if (DEBUG_IMX_AVIC) { \
+ fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_AVIC, \
+ __func__, ##args); \
+ } \
+ } while (0)
+
+static const VMStateDescription vmstate_imx_avic = {
+ .name = TYPE_IMX_AVIC,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(pending, IMXAVICState),
+ VMSTATE_UINT64(enabled, IMXAVICState),
+ VMSTATE_UINT64(is_fiq, IMXAVICState),
+ VMSTATE_UINT32(intcntl, IMXAVICState),
+ VMSTATE_UINT32(intmask, IMXAVICState),
+ VMSTATE_UINT32_ARRAY(prio, IMXAVICState, PRIO_WORDS),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static inline int imx_avic_prio(IMXAVICState *s, int irq)
+{
+ uint32_t word = irq / PRIO_PER_WORD;
+ uint32_t part = 4 * (irq % PRIO_PER_WORD);
+ return 0xf & (s->prio[word] >> part);
+}
+
+/* Update interrupts. */
+static void imx_avic_update(IMXAVICState *s)
+{
+ int i;
+ uint64_t new = s->pending & s->enabled;
+ uint64_t flags;
+
+ flags = new & s->is_fiq;
+ qemu_set_irq(s->fiq, !!flags);
+
+ flags = new & ~s->is_fiq;
+ if (!flags || (s->intmask == 0x1f)) {
+ qemu_set_irq(s->irq, !!flags);
+ return;
+ }
+
+ /*
+ * Take interrupt if there's a pending interrupt with
+ * priority higher than the value of intmask
+ */
+ for (i = 0; i < IMX_AVIC_NUM_IRQS; i++) {
+ if (flags & (1UL << i)) {
+ if (imx_avic_prio(s, i) > s->intmask) {
+ qemu_set_irq(s->irq, 1);
+ return;
+ }
+ }
+ }
+ qemu_set_irq(s->irq, 0);
+}
+
+static void imx_avic_set_irq(void *opaque, int irq, int level)
+{
+ IMXAVICState *s = (IMXAVICState *)opaque;
+
+ if (level) {
+ DPRINTF("Raising IRQ %d, prio %d\n",
+ irq, imx_avic_prio(s, irq));
+ s->pending |= (1ULL << irq);
+ } else {
+ DPRINTF("Clearing IRQ %d, prio %d\n",
+ irq, imx_avic_prio(s, irq));
+ s->pending &= ~(1ULL << irq);
+ }
+
+ imx_avic_update(s);
+}
+
+
+static uint64_t imx_avic_read(void *opaque,
+ hwaddr offset, unsigned size)
+{
+ IMXAVICState *s = (IMXAVICState *)opaque;
+
+ DPRINTF("read(offset = 0x%" HWADDR_PRIx ")\n", offset);
+
+ switch (offset >> 2) {
+ case 0: /* INTCNTL */
+ return s->intcntl;
+
+ case 1: /* Normal Interrupt Mask Register, NIMASK */
+ return s->intmask;
+
+ case 2: /* Interrupt Enable Number Register, INTENNUM */
+ case 3: /* Interrupt Disable Number Register, INTDISNUM */
+ return 0;
+
+ case 4: /* Interrupt Enabled Number Register High */
+ return s->enabled >> 32;
+
+ case 5: /* Interrupt Enabled Number Register Low */
+ return s->enabled & 0xffffffffULL;
+
+ case 6: /* Interrupt Type Register High */
+ return s->is_fiq >> 32;
+
+ case 7: /* Interrupt Type Register Low */
+ return s->is_fiq & 0xffffffffULL;
+
+ case 8: /* Normal Interrupt Priority Register 7 */
+ case 9: /* Normal Interrupt Priority Register 6 */
+ case 10:/* Normal Interrupt Priority Register 5 */
+ case 11:/* Normal Interrupt Priority Register 4 */
+ case 12:/* Normal Interrupt Priority Register 3 */
+ case 13:/* Normal Interrupt Priority Register 2 */
+ case 14:/* Normal Interrupt Priority Register 1 */
+ case 15:/* Normal Interrupt Priority Register 0 */
+ return s->prio[15-(offset>>2)];
+
+ case 16: /* Normal interrupt vector and status register */
+ {
+ /*
+ * This returns the highest priority
+ * outstanding interrupt. Where there is more than
+ * one pending IRQ with the same priority,
+ * take the highest numbered one.
+ */
+ uint64_t flags = s->pending & s->enabled & ~s->is_fiq;
+ int i;
+ int prio = -1;
+ int irq = -1;
+ for (i = 63; i >= 0; --i) {
+ if (flags & (1ULL<<i)) {
+ int irq_prio = imx_avic_prio(s, i);
+ if (irq_prio > prio) {
+ irq = i;
+ prio = irq_prio;
+ }
+ }
+ }
+ if (irq >= 0) {
+ imx_avic_set_irq(s, irq, 0);
+ return irq << 16 | prio;
+ }
+ return 0xffffffffULL;
+ }
+ case 17:/* Fast Interrupt vector and status register */
+ {
+ uint64_t flags = s->pending & s->enabled & s->is_fiq;
+ int i = ctz64(flags);
+ if (i < 64) {
+ imx_avic_set_irq(opaque, i, 0);
+ return i;
+ }
+ return 0xffffffffULL;
+ }
+ case 18:/* Interrupt source register high */
+ return s->pending >> 32;
+
+ case 19:/* Interrupt source register low */
+ return s->pending & 0xffffffffULL;
+
+ case 20:/* Interrupt Force Register high */
+ case 21:/* Interrupt Force Register low */
+ return 0;
+
+ case 22:/* Normal Interrupt Pending Register High */
+ return (s->pending & s->enabled & ~s->is_fiq) >> 32;
+
+ case 23:/* Normal Interrupt Pending Register Low */
+ return (s->pending & s->enabled & ~s->is_fiq) & 0xffffffffULL;
+
+ case 24: /* Fast Interrupt Pending Register High */
+ return (s->pending & s->enabled & s->is_fiq) >> 32;
+
+ case 25: /* Fast Interrupt Pending Register Low */
+ return (s->pending & s->enabled & s->is_fiq) & 0xffffffffULL;
+
+ case 0x40: /* AVIC vector 0, use for WFI WAR */
+ return 0x4;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
+ HWADDR_PRIx "\n", TYPE_IMX_AVIC, __func__, offset);
+ return 0;
+ }
+}
+
+static void imx_avic_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ IMXAVICState *s = (IMXAVICState *)opaque;
+
+ /* Vector Registers not yet supported */
+ if (offset >= 0x100 && offset <= 0x2fc) {
+ qemu_log_mask(LOG_UNIMP, "[%s]%s: vector %d ignored\n",
+ TYPE_IMX_AVIC, __func__, (int)((offset - 0x100) >> 2));
+ return;
+ }
+
+ DPRINTF("(0x%" HWADDR_PRIx ") = 0x%x\n", offset, (unsigned int)val);
+
+ switch (offset >> 2) {
+ case 0: /* Interrupt Control Register, INTCNTL */
+ s->intcntl = val & (ABFEN | NIDIS | FIDIS | NIAD | FIAD | NM);
+ if (s->intcntl & ABFEN) {
+ s->intcntl &= ~(val & ABFLAG);
+ }
+ break;
+
+ case 1: /* Normal Interrupt Mask Register, NIMASK */
+ s->intmask = val & 0x1f;
+ break;
+
+ case 2: /* Interrupt Enable Number Register, INTENNUM */
+ DPRINTF("enable(%d)\n", (int)val);
+ val &= 0x3f;
+ s->enabled |= (1ULL << val);
+ break;
+
+ case 3: /* Interrupt Disable Number Register, INTDISNUM */
+ DPRINTF("disable(%d)\n", (int)val);
+ val &= 0x3f;
+ s->enabled &= ~(1ULL << val);
+ break;
+
+ case 4: /* Interrupt Enable Number Register High */
+ s->enabled = (s->enabled & 0xffffffffULL) | (val << 32);
+ break;
+
+ case 5: /* Interrupt Enable Number Register Low */
+ s->enabled = (s->enabled & 0xffffffff00000000ULL) | val;
+ break;
+
+ case 6: /* Interrupt Type Register High */
+ s->is_fiq = (s->is_fiq & 0xffffffffULL) | (val << 32);
+ break;
+
+ case 7: /* Interrupt Type Register Low */
+ s->is_fiq = (s->is_fiq & 0xffffffff00000000ULL) | val;
+ break;
+
+ case 8: /* Normal Interrupt Priority Register 7 */
+ case 9: /* Normal Interrupt Priority Register 6 */
+ case 10:/* Normal Interrupt Priority Register 5 */
+ case 11:/* Normal Interrupt Priority Register 4 */
+ case 12:/* Normal Interrupt Priority Register 3 */
+ case 13:/* Normal Interrupt Priority Register 2 */
+ case 14:/* Normal Interrupt Priority Register 1 */
+ case 15:/* Normal Interrupt Priority Register 0 */
+ s->prio[15-(offset>>2)] = val;
+ break;
+
+ /* Read-only registers, writes ignored */
+ case 16:/* Normal Interrupt Vector and Status register */
+ case 17:/* Fast Interrupt vector and status register */
+ case 18:/* Interrupt source register high */
+ case 19:/* Interrupt source register low */
+ return;
+
+ case 20:/* Interrupt Force Register high */
+ s->pending = (s->pending & 0xffffffffULL) | (val << 32);
+ break;
+
+ case 21:/* Interrupt Force Register low */
+ s->pending = (s->pending & 0xffffffff00000000ULL) | val;
+ break;
+
+ case 22:/* Normal Interrupt Pending Register High */
+ case 23:/* Normal Interrupt Pending Register Low */
+ case 24: /* Fast Interrupt Pending Register High */
+ case 25: /* Fast Interrupt Pending Register Low */
+ return;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
+ HWADDR_PRIx "\n", TYPE_IMX_AVIC, __func__, offset);
+ }
+ imx_avic_update(s);
+}
+
+static const MemoryRegionOps imx_avic_ops = {
+ .read = imx_avic_read,
+ .write = imx_avic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void imx_avic_reset(DeviceState *dev)
+{
+ IMXAVICState *s = IMX_AVIC(dev);
+
+ s->pending = 0;
+ s->enabled = 0;
+ s->is_fiq = 0;
+ s->intmask = 0x1f;
+ s->intcntl = 0;
+ memset(s->prio, 0, sizeof s->prio);
+}
+
+static void imx_avic_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ IMXAVICState *s = IMX_AVIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ memory_region_init_io(&s->iomem, obj, &imx_avic_ops, s,
+ TYPE_IMX_AVIC, 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ qdev_init_gpio_in(dev, imx_avic_set_irq, IMX_AVIC_NUM_IRQS);
+ sysbus_init_irq(sbd, &s->irq);
+ sysbus_init_irq(sbd, &s->fiq);
+}
+
+
+static void imx_avic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_imx_avic;
+ dc->reset = imx_avic_reset;
+ dc->desc = "i.MX Advanced Vector Interrupt Controller";
+}
+
+static const TypeInfo imx_avic_info = {
+ .name = TYPE_IMX_AVIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMXAVICState),
+ .instance_init = imx_avic_init,
+ .class_init = imx_avic_class_init,
+};
+
+static void imx_avic_register_types(void)
+{
+ type_register_static(&imx_avic_info);
+}
+
+type_init(imx_avic_register_types)
diff --git a/hw/intc/imx_gpcv2.c b/hw/intc/imx_gpcv2.c
new file mode 100644
index 000000000..237d5f97e
--- /dev/null
+++ b/hw/intc/imx_gpcv2.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018, Impinj, Inc.
+ *
+ * i.MX7 GPCv2 block emulation code
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/imx_gpcv2.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+
+#define GPC_PU_PGC_SW_PUP_REQ 0x0f8
+#define GPC_PU_PGC_SW_PDN_REQ 0x104
+
+#define USB_HSIC_PHY_SW_Pxx_REQ BIT(4)
+#define USB_OTG2_PHY_SW_Pxx_REQ BIT(3)
+#define USB_OTG1_PHY_SW_Pxx_REQ BIT(2)
+#define PCIE_PHY_SW_Pxx_REQ BIT(1)
+#define MIPI_PHY_SW_Pxx_REQ BIT(0)
+
+
+static void imx_gpcv2_reset(DeviceState *dev)
+{
+ IMXGPCv2State *s = IMX_GPCV2(dev);
+
+ memset(s->regs, 0, sizeof(s->regs));
+}
+
+static uint64_t imx_gpcv2_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ IMXGPCv2State *s = opaque;
+
+ return s->regs[offset / sizeof(uint32_t)];
+}
+
+static void imx_gpcv2_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ IMXGPCv2State *s = opaque;
+ const size_t idx = offset / sizeof(uint32_t);
+
+ s->regs[idx] = value;
+
+ /*
+ * Real HW will clear those bits once as a way to indicate that
+ * power up request is complete
+ */
+ if (offset == GPC_PU_PGC_SW_PUP_REQ ||
+ offset == GPC_PU_PGC_SW_PDN_REQ) {
+ s->regs[idx] &= ~(USB_HSIC_PHY_SW_Pxx_REQ |
+ USB_OTG2_PHY_SW_Pxx_REQ |
+ USB_OTG1_PHY_SW_Pxx_REQ |
+ PCIE_PHY_SW_Pxx_REQ |
+ MIPI_PHY_SW_Pxx_REQ);
+ }
+}
+
+static const struct MemoryRegionOps imx_gpcv2_ops = {
+ .read = imx_gpcv2_read,
+ .write = imx_gpcv2_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ /*
+ * Our device would not work correctly if the guest was doing
+ * unaligned access. This might not be a limitation on the real
+ * device but in practice there is no reason for a guest to access
+ * this device unaligned.
+ */
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false,
+ },
+};
+
+static void imx_gpcv2_init(Object *obj)
+{
+ SysBusDevice *sd = SYS_BUS_DEVICE(obj);
+ IMXGPCv2State *s = IMX_GPCV2(obj);
+
+ memory_region_init_io(&s->iomem,
+ obj,
+ &imx_gpcv2_ops,
+ s,
+ TYPE_IMX_GPCV2 ".iomem",
+ sizeof(s->regs));
+ sysbus_init_mmio(sd, &s->iomem);
+}
+
+static const VMStateDescription vmstate_imx_gpcv2 = {
+ .name = TYPE_IMX_GPCV2,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, IMXGPCv2State, GPC_NUM),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void imx_gpcv2_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = imx_gpcv2_reset;
+ dc->vmsd = &vmstate_imx_gpcv2;
+ dc->desc = "i.MX GPCv2 Module";
+}
+
+static const TypeInfo imx_gpcv2_info = {
+ .name = TYPE_IMX_GPCV2,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMXGPCv2State),
+ .instance_init = imx_gpcv2_init,
+ .class_init = imx_gpcv2_class_init,
+};
+
+static void imx_gpcv2_register_type(void)
+{
+ type_register_static(&imx_gpcv2_info);
+}
+type_init(imx_gpcv2_register_type)
diff --git a/hw/intc/intc.c b/hw/intc/intc.c
new file mode 100644
index 000000000..2e1e29e75
--- /dev/null
+++ b/hw/intc/intc.c
@@ -0,0 +1,41 @@
+/*
+ * QEMU Generic Interrupt Controller
+ *
+ * Copyright (c) 2016 Hervé Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/intc.h"
+#include "qemu/module.h"
+
+static const TypeInfo intctrl_info = {
+ .name = TYPE_INTERRUPT_STATS_PROVIDER,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(InterruptStatsProviderClass),
+};
+
+static void intc_register_types(void)
+{
+ type_register_static(&intctrl_info);
+}
+
+type_init(intc_register_types)
+
diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c
new file mode 100644
index 000000000..264262959
--- /dev/null
+++ b/hw/intc/ioapic.c
@@ -0,0 +1,513 @@
+/*
+ * ioapic.c IOAPIC emulation logic
+ *
+ * Copyright (c) 2004-2005 Fabrice Bellard
+ *
+ * Split the ioapic logic from apic.c
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "monitor/monitor.h"
+#include "hw/i386/apic.h"
+#include "hw/i386/ioapic.h"
+#include "hw/i386/ioapic_internal.h"
+#include "hw/i386/x86.h"
+#include "hw/intc/i8259.h"
+#include "hw/pci/msi.h"
+#include "hw/qdev-properties.h"
+#include "sysemu/kvm.h"
+#include "sysemu/sysemu.h"
+#include "hw/i386/apic-msidef.h"
+#include "hw/i386/x86-iommu.h"
+#include "trace.h"
+
+#define APIC_DELIVERY_MODE_SHIFT 8
+#define APIC_POLARITY_SHIFT 14
+#define APIC_TRIG_MODE_SHIFT 15
+
+static IOAPICCommonState *ioapics[MAX_IOAPICS];
+
+/* global variable from ioapic_common.c */
+extern int ioapic_no;
+
+struct ioapic_entry_info {
+ /* fields parsed from IOAPIC entries */
+ uint8_t masked;
+ uint8_t trig_mode;
+ uint16_t dest_idx;
+ uint8_t dest_mode;
+ uint8_t delivery_mode;
+ uint8_t vector;
+
+ /* MSI message generated from above parsed fields */
+ uint32_t addr;
+ uint32_t data;
+};
+
+static void ioapic_entry_parse(uint64_t entry, struct ioapic_entry_info *info)
+{
+ memset(info, 0, sizeof(*info));
+ info->masked = (entry >> IOAPIC_LVT_MASKED_SHIFT) & 1;
+ info->trig_mode = (entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1;
+ /*
+ * By default, this would be dest_id[8] + reserved[8]. When IR
+ * is enabled, this would be interrupt_index[15] +
+ * interrupt_format[1]. This field never means anything, but
+ * only used to generate corresponding MSI.
+ */
+ info->dest_idx = (entry >> IOAPIC_LVT_DEST_IDX_SHIFT) & 0xffff;
+ info->dest_mode = (entry >> IOAPIC_LVT_DEST_MODE_SHIFT) & 1;
+ info->delivery_mode = (entry >> IOAPIC_LVT_DELIV_MODE_SHIFT) \
+ & IOAPIC_DM_MASK;
+ if (info->delivery_mode == IOAPIC_DM_EXTINT) {
+ info->vector = pic_read_irq(isa_pic);
+ } else {
+ info->vector = entry & IOAPIC_VECTOR_MASK;
+ }
+
+ info->addr = APIC_DEFAULT_ADDRESS | \
+ (info->dest_idx << MSI_ADDR_DEST_IDX_SHIFT) | \
+ (info->dest_mode << MSI_ADDR_DEST_MODE_SHIFT);
+ info->data = (info->vector << MSI_DATA_VECTOR_SHIFT) | \
+ (info->trig_mode << MSI_DATA_TRIGGER_SHIFT) | \
+ (info->delivery_mode << MSI_DATA_DELIVERY_MODE_SHIFT);
+}
+
+static void ioapic_service(IOAPICCommonState *s)
+{
+ AddressSpace *ioapic_as = X86_MACHINE(qdev_get_machine())->ioapic_as;
+ struct ioapic_entry_info info;
+ uint8_t i;
+ uint32_t mask;
+ uint64_t entry;
+
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ mask = 1 << i;
+ if (s->irr & mask) {
+ int coalesce = 0;
+
+ entry = s->ioredtbl[i];
+ ioapic_entry_parse(entry, &info);
+ if (!info.masked) {
+ if (info.trig_mode == IOAPIC_TRIGGER_EDGE) {
+ s->irr &= ~mask;
+ } else {
+ coalesce = s->ioredtbl[i] & IOAPIC_LVT_REMOTE_IRR;
+ trace_ioapic_set_remote_irr(i);
+ s->ioredtbl[i] |= IOAPIC_LVT_REMOTE_IRR;
+ }
+
+ if (coalesce) {
+ /* We are level triggered interrupts, and the
+ * guest should be still working on previous one,
+ * so skip it. */
+ continue;
+ }
+
+#ifdef CONFIG_KVM
+ if (kvm_irqchip_is_split()) {
+ if (info.trig_mode == IOAPIC_TRIGGER_EDGE) {
+ kvm_set_irq(kvm_state, i, 1);
+ kvm_set_irq(kvm_state, i, 0);
+ } else {
+ kvm_set_irq(kvm_state, i, 1);
+ }
+ continue;
+ }
+#endif
+
+ /* No matter whether IR is enabled, we translate
+ * the IOAPIC message into a MSI one, and its
+ * address space will decide whether we need a
+ * translation. */
+ stl_le_phys(ioapic_as, info.addr, info.data);
+ }
+ }
+ }
+}
+
+#define SUCCESSIVE_IRQ_MAX_COUNT 10000
+
+static void delayed_ioapic_service_cb(void *opaque)
+{
+ IOAPICCommonState *s = opaque;
+
+ ioapic_service(s);
+}
+
+static void ioapic_set_irq(void *opaque, int vector, int level)
+{
+ IOAPICCommonState *s = opaque;
+
+ /* ISA IRQs map to GSI 1-1 except for IRQ0 which maps
+ * to GSI 2. GSI maps to ioapic 1-1. This is not
+ * the cleanest way of doing it but it should work. */
+
+ trace_ioapic_set_irq(vector, level);
+ ioapic_stat_update_irq(s, vector, level);
+ if (vector == 0) {
+ vector = 2;
+ }
+ if (vector < IOAPIC_NUM_PINS) {
+ uint32_t mask = 1 << vector;
+ uint64_t entry = s->ioredtbl[vector];
+
+ if (((entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1) ==
+ IOAPIC_TRIGGER_LEVEL) {
+ /* level triggered */
+ if (level) {
+ s->irr |= mask;
+ if (!(entry & IOAPIC_LVT_REMOTE_IRR)) {
+ ioapic_service(s);
+ }
+ } else {
+ s->irr &= ~mask;
+ }
+ } else {
+ /* According to the 82093AA manual, we must ignore edge requests
+ * if the input pin is masked. */
+ if (level && !(entry & IOAPIC_LVT_MASKED)) {
+ s->irr |= mask;
+ ioapic_service(s);
+ }
+ }
+ }
+}
+
+static void ioapic_update_kvm_routes(IOAPICCommonState *s)
+{
+#ifdef CONFIG_KVM
+ int i;
+
+ if (kvm_irqchip_is_split()) {
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ MSIMessage msg;
+ struct ioapic_entry_info info;
+ ioapic_entry_parse(s->ioredtbl[i], &info);
+ if (!info.masked) {
+ msg.address = info.addr;
+ msg.data = info.data;
+ kvm_irqchip_update_msi_route(kvm_state, i, msg, NULL);
+ }
+ }
+ kvm_irqchip_commit_routes(kvm_state);
+ }
+#endif
+}
+
+#ifdef CONFIG_KVM
+static void ioapic_iec_notifier(void *private, bool global,
+ uint32_t index, uint32_t mask)
+{
+ IOAPICCommonState *s = (IOAPICCommonState *)private;
+ /* For simplicity, we just update all the routes */
+ ioapic_update_kvm_routes(s);
+}
+#endif
+
+void ioapic_eoi_broadcast(int vector)
+{
+ IOAPICCommonState *s;
+ uint64_t entry;
+ int i, n;
+
+ trace_ioapic_eoi_broadcast(vector);
+
+ for (i = 0; i < MAX_IOAPICS; i++) {
+ s = ioapics[i];
+ if (!s) {
+ continue;
+ }
+ for (n = 0; n < IOAPIC_NUM_PINS; n++) {
+ entry = s->ioredtbl[n];
+
+ if ((entry & IOAPIC_VECTOR_MASK) != vector ||
+ ((entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1) != IOAPIC_TRIGGER_LEVEL) {
+ continue;
+ }
+
+#ifdef CONFIG_KVM
+ /*
+ * When IOAPIC is in the userspace while APIC is still in
+ * the kernel (i.e., split irqchip), we have a trick to
+ * kick the resamplefd logic for registered irqfds from
+ * userspace to deactivate the IRQ. When that happens, it
+ * means the irq bypassed userspace IOAPIC (so the irr and
+ * remote-irr of the table entry should be bypassed too
+ * even if interrupt come). Still kick the resamplefds if
+ * they're bound to the IRQ, to make sure to EOI the
+ * interrupt for the hardware correctly.
+ *
+ * Note: We still need to go through the irr & remote-irr
+ * operations below because we don't know whether there're
+ * emulated devices that are using/sharing the same IRQ.
+ */
+ kvm_resample_fd_notify(n);
+#endif
+
+ if (!(entry & IOAPIC_LVT_REMOTE_IRR)) {
+ continue;
+ }
+
+ trace_ioapic_clear_remote_irr(n, vector);
+ s->ioredtbl[n] = entry & ~IOAPIC_LVT_REMOTE_IRR;
+
+ if (!(entry & IOAPIC_LVT_MASKED) && (s->irr & (1 << n))) {
+ ++s->irq_eoi[n];
+ if (s->irq_eoi[n] >= SUCCESSIVE_IRQ_MAX_COUNT) {
+ /*
+ * Real hardware does not deliver the interrupt immediately
+ * during eoi broadcast, and this lets a buggy guest make
+ * slow progress even if it does not correctly handle a
+ * level-triggered interrupt. Emulate this behavior if we
+ * detect an interrupt storm.
+ */
+ s->irq_eoi[n] = 0;
+ timer_mod_anticipate(s->delayed_ioapic_service_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ NANOSECONDS_PER_SECOND / 100);
+ trace_ioapic_eoi_delayed_reassert(n);
+ } else {
+ ioapic_service(s);
+ }
+ } else {
+ s->irq_eoi[n] = 0;
+ }
+ }
+ }
+}
+
+static uint64_t
+ioapic_mem_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ IOAPICCommonState *s = opaque;
+ int index;
+ uint32_t val = 0;
+
+ addr &= 0xff;
+
+ switch (addr) {
+ case IOAPIC_IOREGSEL:
+ val = s->ioregsel;
+ break;
+ case IOAPIC_IOWIN:
+ if (size != 4) {
+ break;
+ }
+ switch (s->ioregsel) {
+ case IOAPIC_REG_ID:
+ case IOAPIC_REG_ARB:
+ val = s->id << IOAPIC_ID_SHIFT;
+ break;
+ case IOAPIC_REG_VER:
+ val = s->version |
+ ((IOAPIC_NUM_PINS - 1) << IOAPIC_VER_ENTRIES_SHIFT);
+ break;
+ default:
+ index = (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1;
+ if (index >= 0 && index < IOAPIC_NUM_PINS) {
+ if (s->ioregsel & 1) {
+ val = s->ioredtbl[index] >> 32;
+ } else {
+ val = s->ioredtbl[index] & 0xffffffff;
+ }
+ }
+ }
+ break;
+ }
+
+ trace_ioapic_mem_read(addr, s->ioregsel, size, val);
+
+ return val;
+}
+
+/*
+ * This is to satisfy the hack in Linux kernel. One hack of it is to
+ * simulate clearing the Remote IRR bit of IOAPIC entry using the
+ * following:
+ *
+ * "For IO-APIC's with EOI register, we use that to do an explicit EOI.
+ * Otherwise, we simulate the EOI message manually by changing the trigger
+ * mode to edge and then back to level, with RTE being masked during
+ * this."
+ *
+ * (See linux kernel __eoi_ioapic_pin() comment in commit c0205701)
+ *
+ * This is based on the assumption that, Remote IRR bit will be
+ * cleared by IOAPIC hardware when configured as edge-triggered
+ * interrupts.
+ *
+ * Without this, level-triggered interrupts in IR mode might fail to
+ * work correctly.
+ */
+static inline void
+ioapic_fix_edge_remote_irr(uint64_t *entry)
+{
+ if (!(*entry & IOAPIC_LVT_TRIGGER_MODE)) {
+ /* Edge-triggered interrupts, make sure remote IRR is zero */
+ *entry &= ~((uint64_t)IOAPIC_LVT_REMOTE_IRR);
+ }
+}
+
+static void
+ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned int size)
+{
+ IOAPICCommonState *s = opaque;
+ int index;
+
+ addr &= 0xff;
+ trace_ioapic_mem_write(addr, s->ioregsel, size, val);
+
+ switch (addr) {
+ case IOAPIC_IOREGSEL:
+ s->ioregsel = val;
+ break;
+ case IOAPIC_IOWIN:
+ if (size != 4) {
+ break;
+ }
+ switch (s->ioregsel) {
+ case IOAPIC_REG_ID:
+ s->id = (val >> IOAPIC_ID_SHIFT) & IOAPIC_ID_MASK;
+ break;
+ case IOAPIC_REG_VER:
+ case IOAPIC_REG_ARB:
+ break;
+ default:
+ index = (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1;
+ if (index >= 0 && index < IOAPIC_NUM_PINS) {
+ uint64_t ro_bits = s->ioredtbl[index] & IOAPIC_RO_BITS;
+ if (s->ioregsel & 1) {
+ s->ioredtbl[index] &= 0xffffffff;
+ s->ioredtbl[index] |= (uint64_t)val << 32;
+ } else {
+ s->ioredtbl[index] &= ~0xffffffffULL;
+ s->ioredtbl[index] |= val;
+ }
+ /* restore RO bits */
+ s->ioredtbl[index] &= IOAPIC_RW_BITS;
+ s->ioredtbl[index] |= ro_bits;
+ s->irq_eoi[index] = 0;
+ ioapic_fix_edge_remote_irr(&s->ioredtbl[index]);
+ ioapic_service(s);
+ }
+ }
+ break;
+ case IOAPIC_EOI:
+ /* Explicit EOI is only supported for IOAPIC version 0x20 */
+ if (size != 4 || s->version != 0x20) {
+ break;
+ }
+ ioapic_eoi_broadcast(val);
+ break;
+ }
+
+ ioapic_update_kvm_routes(s);
+}
+
+static const MemoryRegionOps ioapic_io_ops = {
+ .read = ioapic_mem_read,
+ .write = ioapic_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void ioapic_machine_done_notify(Notifier *notifier, void *data)
+{
+#ifdef CONFIG_KVM
+ IOAPICCommonState *s = container_of(notifier, IOAPICCommonState,
+ machine_done);
+
+ if (kvm_irqchip_is_split()) {
+ X86IOMMUState *iommu = x86_iommu_get_default();
+ if (iommu) {
+ /* Register this IOAPIC with IOMMU IEC notifier, so that
+ * when there are IR invalidates, we can be notified to
+ * update kernel IR cache. */
+ x86_iommu_iec_register_notifier(iommu, ioapic_iec_notifier, s);
+ }
+ }
+#endif
+}
+
+#define IOAPIC_VER_DEF 0x20
+
+static void ioapic_realize(DeviceState *dev, Error **errp)
+{
+ IOAPICCommonState *s = IOAPIC_COMMON(dev);
+
+ if (s->version != 0x11 && s->version != 0x20) {
+ error_setg(errp, "IOAPIC only supports version 0x11 or 0x20 "
+ "(default: 0x%x).", IOAPIC_VER_DEF);
+ return;
+ }
+
+ memory_region_init_io(&s->io_memory, OBJECT(s), &ioapic_io_ops, s,
+ "ioapic", 0x1000);
+
+ s->delayed_ioapic_service_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, delayed_ioapic_service_cb, s);
+
+ qdev_init_gpio_in(dev, ioapic_set_irq, IOAPIC_NUM_PINS);
+
+ ioapics[ioapic_no] = s;
+ s->machine_done.notify = ioapic_machine_done_notify;
+ qemu_add_machine_init_done_notifier(&s->machine_done);
+}
+
+static void ioapic_unrealize(DeviceState *dev)
+{
+ IOAPICCommonState *s = IOAPIC_COMMON(dev);
+
+ timer_free(s->delayed_ioapic_service_timer);
+}
+
+static Property ioapic_properties[] = {
+ DEFINE_PROP_UINT8("version", IOAPICCommonState, version, IOAPIC_VER_DEF),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ioapic_class_init(ObjectClass *klass, void *data)
+{
+ IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ k->realize = ioapic_realize;
+ k->unrealize = ioapic_unrealize;
+ /*
+ * If APIC is in kernel, we need to update the kernel cache after
+ * migration, otherwise first 24 gsi routes will be invalid.
+ */
+ k->post_load = ioapic_update_kvm_routes;
+ dc->reset = ioapic_reset_common;
+ device_class_set_props(dc, ioapic_properties);
+}
+
+static const TypeInfo ioapic_info = {
+ .name = TYPE_IOAPIC,
+ .parent = TYPE_IOAPIC_COMMON,
+ .instance_size = sizeof(IOAPICCommonState),
+ .class_init = ioapic_class_init,
+};
+
+static void ioapic_register_types(void)
+{
+ type_register_static(&ioapic_info);
+}
+
+type_init(ioapic_register_types)
diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c
new file mode 100644
index 000000000..3cccfc155
--- /dev/null
+++ b/hw/intc/ioapic_common.c
@@ -0,0 +1,224 @@
+/*
+ * IOAPIC emulation logic - common bits of emulated and KVM kernel model
+ *
+ * Copyright (c) 2004-2005 Fabrice Bellard
+ * Copyright (c) 2009 Xiantao Zhang, Intel
+ * Copyright (c) 2011 Jan Kiszka, Siemens AG
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "migration/vmstate.h"
+#include "monitor/monitor.h"
+#include "hw/i386/ioapic.h"
+#include "hw/i386/ioapic_internal.h"
+#include "hw/intc/intc.h"
+#include "hw/sysbus.h"
+
+/* ioapic_no count start from 0 to MAX_IOAPICS,
+ * remove as static variable from ioapic_common_init.
+ * now as a global variable, let child to increase the counter
+ * then we can drop the 'instance_no' argument
+ * and convert to our QOM's realize function
+ */
+int ioapic_no;
+
+void ioapic_stat_update_irq(IOAPICCommonState *s, int irq, int level)
+{
+ if (level != s->irq_level[irq]) {
+ s->irq_level[irq] = level;
+ if (level == 1) {
+ s->irq_count[irq]++;
+ }
+ }
+}
+
+static bool ioapic_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts,
+ unsigned int *nb_irqs)
+{
+ IOAPICCommonState *s = IOAPIC_COMMON(obj);
+
+ *irq_counts = s->irq_count;
+ *nb_irqs = IOAPIC_NUM_PINS;
+
+ return true;
+}
+
+static void ioapic_irr_dump(Monitor *mon, const char *name, uint32_t bitmap)
+{
+ int i;
+
+ monitor_printf(mon, "%-10s ", name);
+ if (bitmap == 0) {
+ monitor_printf(mon, "(none)\n");
+ return;
+ }
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ if (bitmap & (1 << i)) {
+ monitor_printf(mon, "%-2u ", i);
+ }
+ }
+ monitor_printf(mon, "\n");
+}
+
+void ioapic_print_redtbl(Monitor *mon, IOAPICCommonState *s)
+{
+ static const char *delm_str[] = {
+ "fixed", "lowest", "SMI", "...", "NMI", "INIT", "...", "extINT"};
+ uint32_t remote_irr = 0;
+ int i;
+
+ monitor_printf(mon, "ioapic0: ver=0x%x id=0x%02x sel=0x%02x",
+ s->version, s->id, s->ioregsel);
+ if (s->ioregsel) {
+ monitor_printf(mon, " (redir[%u])\n",
+ (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1);
+ } else {
+ monitor_printf(mon, "\n");
+ }
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ uint64_t entry = s->ioredtbl[i];
+ uint32_t delm = (uint32_t)((entry & IOAPIC_LVT_DELIV_MODE) >>
+ IOAPIC_LVT_DELIV_MODE_SHIFT);
+ monitor_printf(mon, " pin %-2u 0x%016"PRIx64" dest=%"PRIx64
+ " vec=%-3"PRIu64" %s %-5s %-6s %-6s %s\n",
+ i, entry,
+ (entry >> IOAPIC_LVT_DEST_SHIFT) &
+ (entry & IOAPIC_LVT_DEST_MODE ? 0xff : 0xf),
+ entry & IOAPIC_VECTOR_MASK,
+ entry & IOAPIC_LVT_POLARITY ? "active-lo" : "active-hi",
+ entry & IOAPIC_LVT_TRIGGER_MODE ? "level" : "edge",
+ entry & IOAPIC_LVT_MASKED ? "masked" : "",
+ delm_str[delm],
+ entry & IOAPIC_LVT_DEST_MODE ? "logical" : "physical");
+
+ remote_irr |= entry & IOAPIC_LVT_TRIGGER_MODE ?
+ (entry & IOAPIC_LVT_REMOTE_IRR ? (1 << i) : 0) : 0;
+ }
+ ioapic_irr_dump(mon, " IRR", s->irr);
+ ioapic_irr_dump(mon, " Remote IRR", remote_irr);
+}
+
+void ioapic_reset_common(DeviceState *dev)
+{
+ IOAPICCommonState *s = IOAPIC_COMMON(dev);
+ int i;
+
+ s->id = 0;
+ s->ioregsel = 0;
+ s->irr = 0;
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ s->ioredtbl[i] = 1 << IOAPIC_LVT_MASKED_SHIFT;
+ }
+}
+
+static int ioapic_dispatch_pre_save(void *opaque)
+{
+ IOAPICCommonState *s = IOAPIC_COMMON(opaque);
+ IOAPICCommonClass *info = IOAPIC_COMMON_GET_CLASS(s);
+
+ if (info->pre_save) {
+ info->pre_save(s);
+ }
+
+ return 0;
+}
+
+static int ioapic_dispatch_post_load(void *opaque, int version_id)
+{
+ IOAPICCommonState *s = IOAPIC_COMMON(opaque);
+ IOAPICCommonClass *info = IOAPIC_COMMON_GET_CLASS(s);
+
+ if (info->post_load) {
+ info->post_load(s);
+ }
+ return 0;
+}
+
+static void ioapic_common_realize(DeviceState *dev, Error **errp)
+{
+ IOAPICCommonState *s = IOAPIC_COMMON(dev);
+ IOAPICCommonClass *info;
+
+ if (ioapic_no >= MAX_IOAPICS) {
+ error_setg(errp, "Only %d ioapics allowed", MAX_IOAPICS);
+ return;
+ }
+
+ info = IOAPIC_COMMON_GET_CLASS(s);
+ info->realize(dev, errp);
+
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io_memory);
+ ioapic_no++;
+}
+
+static void ioapic_print_info(InterruptStatsProvider *obj,
+ Monitor *mon)
+{
+ IOAPICCommonState *s = IOAPIC_COMMON(obj);
+
+ ioapic_dispatch_pre_save(s);
+ ioapic_print_redtbl(mon, s);
+}
+
+static const VMStateDescription vmstate_ioapic_common = {
+ .name = "ioapic",
+ .version_id = 3,
+ .minimum_version_id = 1,
+ .pre_save = ioapic_dispatch_pre_save,
+ .post_load = ioapic_dispatch_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(id, IOAPICCommonState),
+ VMSTATE_UINT8(ioregsel, IOAPICCommonState),
+ VMSTATE_UNUSED_V(2, 8), /* to account for qemu-kvm's v2 format */
+ VMSTATE_UINT32_V(irr, IOAPICCommonState, 2),
+ VMSTATE_UINT64_ARRAY(ioredtbl, IOAPICCommonState, IOAPIC_NUM_PINS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void ioapic_common_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
+
+ dc->realize = ioapic_common_realize;
+ dc->vmsd = &vmstate_ioapic_common;
+ ic->print_info = ioapic_print_info;
+ ic->get_statistics = ioapic_get_statistics;
+}
+
+static const TypeInfo ioapic_common_type = {
+ .name = TYPE_IOAPIC_COMMON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IOAPICCommonState),
+ .class_size = sizeof(IOAPICCommonClass),
+ .class_init = ioapic_common_class_init,
+ .abstract = true,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
+};
+
+static void ioapic_common_register_types(void)
+{
+ type_register_static(&ioapic_common_type);
+}
+
+type_init(ioapic_common_register_types)
diff --git a/hw/intc/loongson_liointc.c b/hw/intc/loongson_liointc.c
new file mode 100644
index 000000000..cc11b544c
--- /dev/null
+++ b/hw/intc/loongson_liointc.c
@@ -0,0 +1,249 @@
+/*
+ * QEMU Loongson Local I/O interrupt controler.
+ *
+ * Copyright (c) 2020 Huacai Chen <chenhc@lemote.com>
+ * Copyright (c) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/module.h"
+#include "qemu/log.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/intc/loongson_liointc.h"
+
+#define NUM_IRQS 32
+
+#define NUM_CORES 4
+#define NUM_IPS 4
+#define NUM_PARENTS (NUM_CORES * NUM_IPS)
+#define PARENT_COREx_IPy(x, y) (NUM_IPS * x + y)
+
+#define R_MAPPER_START 0x0
+#define R_MAPPER_END 0x20
+#define R_ISR R_MAPPER_END
+#define R_IEN 0x24
+#define R_IEN_SET 0x28
+#define R_IEN_CLR 0x2c
+#define R_ISR_SIZE 0x8
+#define R_START 0x40
+#define R_END (R_START + R_ISR_SIZE * NUM_CORES)
+
+struct loongson_liointc {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+ qemu_irq parent_irq[NUM_PARENTS];
+
+ uint8_t mapper[NUM_IRQS]; /* 0:3 for core, 4:7 for IP */
+ uint32_t isr;
+ uint32_t ien;
+ uint32_t per_core_isr[NUM_CORES];
+
+ /* state of the interrupt input pins */
+ uint32_t pin_state;
+ bool parent_state[NUM_PARENTS];
+};
+
+static void update_irq(struct loongson_liointc *p)
+{
+ uint32_t irq, core, ip;
+ uint32_t per_ip_isr[NUM_IPS] = {0};
+
+ /* level triggered interrupt */
+ p->isr = p->pin_state;
+
+ /* Clear disabled IRQs */
+ p->isr &= p->ien;
+
+ /* Clear per_core_isr */
+ for (core = 0; core < NUM_CORES; core++) {
+ p->per_core_isr[core] = 0;
+ }
+
+ /* Update per_core_isr and per_ip_isr */
+ for (irq = 0; irq < NUM_IRQS; irq++) {
+ if (!(p->isr & (1 << irq))) {
+ continue;
+ }
+
+ for (core = 0; core < NUM_CORES; core++) {
+ if ((p->mapper[irq] & (1 << core))) {
+ p->per_core_isr[core] |= (1 << irq);
+ }
+ }
+
+ for (ip = 0; ip < NUM_IPS; ip++) {
+ if ((p->mapper[irq] & (1 << (ip + 4)))) {
+ per_ip_isr[ip] |= (1 << irq);
+ }
+ }
+ }
+
+ /* Emit IRQ to parent! */
+ for (core = 0; core < NUM_CORES; core++) {
+ for (ip = 0; ip < NUM_IPS; ip++) {
+ int parent = PARENT_COREx_IPy(core, ip);
+ if (p->parent_state[parent] !=
+ (!!p->per_core_isr[core] && !!per_ip_isr[ip])) {
+ p->parent_state[parent] = !p->parent_state[parent];
+ qemu_set_irq(p->parent_irq[parent], p->parent_state[parent]);
+ }
+ }
+ }
+}
+
+static uint64_t
+liointc_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ struct loongson_liointc *p = opaque;
+ uint32_t r = 0;
+
+ /* Mapper is 1 byte */
+ if (size == 1 && addr < R_MAPPER_END) {
+ r = p->mapper[addr];
+ goto out;
+ }
+
+ /* Rest are 4 bytes */
+ if (size != 4 || (addr % 4)) {
+ goto out;
+ }
+
+ if (addr >= R_START && addr < R_END) {
+ hwaddr offset = addr - R_START;
+ int core = offset / R_ISR_SIZE;
+
+ if (offset % R_ISR_SIZE) {
+ goto out;
+ }
+ r = p->per_core_isr[core];
+ goto out;
+ }
+
+ switch (addr) {
+ case R_ISR:
+ r = p->isr;
+ break;
+ case R_IEN:
+ r = p->ien;
+ break;
+ default:
+ break;
+ }
+
+out:
+ qemu_log_mask(CPU_LOG_INT, "%s: size=%d, addr=%"HWADDR_PRIx", val=%x\n",
+ __func__, size, addr, r);
+ return r;
+}
+
+static void
+liointc_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ struct loongson_liointc *p = opaque;
+ uint32_t value = val64;
+
+ qemu_log_mask(CPU_LOG_INT, "%s: size=%d, addr=%"HWADDR_PRIx", val=%x\n",
+ __func__, size, addr, value);
+
+ /* Mapper is 1 byte */
+ if (size == 1 && addr < R_MAPPER_END) {
+ p->mapper[addr] = value;
+ goto out;
+ }
+
+ /* Rest are 4 bytes */
+ if (size != 4 || (addr % 4)) {
+ goto out;
+ }
+
+ if (addr >= R_START && addr < R_END) {
+ hwaddr offset = addr - R_START;
+ int core = offset / R_ISR_SIZE;
+
+ if (offset % R_ISR_SIZE) {
+ goto out;
+ }
+ p->per_core_isr[core] = value;
+ goto out;
+ }
+
+ switch (addr) {
+ case R_IEN_SET:
+ p->ien |= value;
+ break;
+ case R_IEN_CLR:
+ p->ien &= ~value;
+ break;
+ default:
+ break;
+ }
+
+out:
+ update_irq(p);
+}
+
+static const MemoryRegionOps pic_ops = {
+ .read = liointc_read,
+ .write = liointc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4
+ }
+};
+
+static void irq_handler(void *opaque, int irq, int level)
+{
+ struct loongson_liointc *p = opaque;
+
+ p->pin_state &= ~(1 << irq);
+ p->pin_state |= level << irq;
+ update_irq(p);
+}
+
+static void loongson_liointc_init(Object *obj)
+{
+ struct loongson_liointc *p = LOONGSON_LIOINTC(obj);
+ int i;
+
+ qdev_init_gpio_in(DEVICE(obj), irq_handler, 32);
+
+ for (i = 0; i < NUM_PARENTS; i++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &p->parent_irq[i]);
+ }
+
+ memory_region_init_io(&p->mmio, obj, &pic_ops, p,
+ TYPE_LOONGSON_LIOINTC, R_END);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &p->mmio);
+}
+
+static const TypeInfo loongson_liointc_info = {
+ .name = TYPE_LOONGSON_LIOINTC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(struct loongson_liointc),
+ .instance_init = loongson_liointc_init,
+};
+
+static void loongson_liointc_register_types(void)
+{
+ type_register_static(&loongson_liointc_info);
+}
+
+type_init(loongson_liointc_register_types)
diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c
new file mode 100644
index 000000000..0c515e4ec
--- /dev/null
+++ b/hw/intc/m68k_irqc.c
@@ -0,0 +1,119 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * QEMU Motorola 680x0 IRQ Controller
+ *
+ * (c) 2020 Laurent Vivier <laurent@vivier.eu>
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "migration/vmstate.h"
+#include "monitor/monitor.h"
+#include "hw/nmi.h"
+#include "hw/intc/intc.h"
+#include "hw/intc/m68k_irqc.h"
+
+
+static bool m68k_irqc_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts, unsigned int *nb_irqs)
+{
+ M68KIRQCState *s = M68K_IRQC(obj);
+
+ *irq_counts = s->stats_irq_count;
+ *nb_irqs = ARRAY_SIZE(s->stats_irq_count);
+ return true;
+}
+
+static void m68k_irqc_print_info(InterruptStatsProvider *obj, Monitor *mon)
+{
+ M68KIRQCState *s = M68K_IRQC(obj);
+ monitor_printf(mon, "m68k-irqc: ipr=0x%x\n", s->ipr);
+}
+
+static void m68k_set_irq(void *opaque, int irq, int level)
+{
+ M68KIRQCState *s = opaque;
+ M68kCPU *cpu = M68K_CPU(first_cpu);
+ int i;
+
+ if (level) {
+ s->ipr |= 1 << irq;
+ s->stats_irq_count[irq]++;
+ } else {
+ s->ipr &= ~(1 << irq);
+ }
+
+ for (i = M68K_IRQC_LEVEL_7; i >= M68K_IRQC_LEVEL_1; i--) {
+ if ((s->ipr >> i) & 1) {
+ m68k_set_irq_level(cpu, i + 1, i + M68K_IRQC_AUTOVECTOR_BASE);
+ return;
+ }
+ }
+ m68k_set_irq_level(cpu, 0, 0);
+}
+
+static void m68k_irqc_reset(DeviceState *d)
+{
+ M68KIRQCState *s = M68K_IRQC(d);
+ int i;
+
+ s->ipr = 0;
+ for (i = 0; i < ARRAY_SIZE(s->stats_irq_count); i++) {
+ s->stats_irq_count[i] = 0;
+ }
+}
+
+static void m68k_irqc_instance_init(Object *obj)
+{
+ qdev_init_gpio_in(DEVICE(obj), m68k_set_irq, M68K_IRQC_LEVEL_NUM);
+}
+
+static void m68k_nmi(NMIState *n, int cpu_index, Error **errp)
+{
+ m68k_set_irq(n, M68K_IRQC_LEVEL_7, 1);
+}
+
+static const VMStateDescription vmstate_m68k_irqc = {
+ .name = "m68k-irqc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(ipr, M68KIRQCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void m68k_irqc_class_init(ObjectClass *oc, void *data)
+ {
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ NMIClass *nc = NMI_CLASS(oc);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(oc);
+
+ nc->nmi_monitor_handler = m68k_nmi;
+ dc->reset = m68k_irqc_reset;
+ dc->vmsd = &vmstate_m68k_irqc;
+ ic->get_statistics = m68k_irqc_get_statistics;
+ ic->print_info = m68k_irqc_print_info;
+}
+
+static const TypeInfo m68k_irqc_type_info = {
+ .name = TYPE_M68K_IRQC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(M68KIRQCState),
+ .instance_init = m68k_irqc_instance_init,
+ .class_init = m68k_irqc_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_NMI },
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
+};
+
+static void q800_irq_register_types(void)
+{
+ type_register_static(&m68k_irqc_type_info);
+}
+
+type_init(q800_irq_register_types);
diff --git a/hw/intc/meson.build b/hw/intc/meson.build
new file mode 100644
index 000000000..c89d2ca18
--- /dev/null
+++ b/hw/intc/meson.build
@@ -0,0 +1,59 @@
+softmmu_ss.add(files('intc.c'))
+softmmu_ss.add(when: 'CONFIG_ARM_GIC', if_true: files(
+ 'arm_gic.c',
+ 'arm_gic_common.c',
+ 'arm_gicv2m.c',
+ 'arm_gicv3.c',
+ 'arm_gicv3_common.c',
+ 'arm_gicv3_dist.c',
+ 'arm_gicv3_its_common.c',
+ 'arm_gicv3_redist.c',
+ 'arm_gicv3_its.c',
+))
+softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_pic.c'))
+softmmu_ss.add(when: 'CONFIG_HEATHROW_PIC', if_true: files('heathrow_pic.c'))
+softmmu_ss.add(when: 'CONFIG_I8259', if_true: files('i8259_common.c', 'i8259.c'))
+softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_avic.c', 'imx_gpcv2.c'))
+softmmu_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic_common.c'))
+softmmu_ss.add(when: 'CONFIG_OPENPIC', if_true: files('openpic.c'))
+softmmu_ss.add(when: 'CONFIG_PL190', if_true: files('pl190.c'))
+softmmu_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview_gic.c'))
+softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_intctl.c'))
+softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_intc.c'))
+softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-ipi.c'))
+softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_PMU', if_true: files('xlnx-pmu-iomod-intc.c'))
+
+specific_ss.add(when: 'CONFIG_ALLWINNER_A10_PIC', if_true: files('allwinner-a10-pic.c'))
+specific_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c', 'apic_common.c'))
+specific_ss.add(when: 'CONFIG_ARM_GIC', if_true: files('arm_gicv3_cpuif.c'))
+specific_ss.add(when: 'CONFIG_ARM_GIC_KVM', if_true: files('arm_gic_kvm.c'))
+specific_ss.add(when: ['CONFIG_ARM_GIC_KVM', 'TARGET_AARCH64'], if_true: files('arm_gicv3_kvm.c', 'arm_gicv3_its_kvm.c'))
+specific_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_nvic.c'))
+specific_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_vic.c'))
+specific_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_gic.c', 'exynos4210_combiner.c'))
+specific_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_irqmp.c'))
+specific_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic.c'))
+specific_ss.add(when: 'CONFIG_LOONGSON_LIOINTC', if_true: files('loongson_liointc.c'))
+specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_gic.c'))
+specific_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_intc.c'))
+specific_ss.add(when: 'CONFIG_OMPIC', if_true: files('ompic.c'))
+specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_OPENPIC'],
+ if_true: files('openpic_kvm.c'))
+specific_ss.add(when: 'CONFIG_POWERNV', if_true: files('xics_pnv.c', 'pnv_xive.c'))
+specific_ss.add(when: 'CONFIG_PPC_UIC', if_true: files('ppc-uic.c'))
+specific_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_ic.c', 'bcm2836_control.c'))
+specific_ss.add(when: 'CONFIG_RX_ICU', if_true: files('rx_icu.c'))
+specific_ss.add(when: 'CONFIG_S390_FLIC', if_true: files('s390_flic.c'))
+specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c'))
+specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
+specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c'))
+specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
+specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c'))
+specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'],
+ if_true: files('xics_kvm.c'))
+specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('xics_spapr.c', 'spapr_xive.c'))
+specific_ss.add(when: 'CONFIG_XIVE', if_true: files('xive.c'))
+specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XIVE'],
+ if_true: files('spapr_xive_kvm.c'))
+specific_ss.add(when: 'CONFIG_GOLDFISH_PIC', if_true: files('goldfish_pic.c'))
+specific_ss.add(when: 'CONFIG_M68K_IRQC', if_true: files('m68k_irqc.c'))
diff --git a/hw/intc/mips_gic.c b/hw/intc/mips_gic.c
new file mode 100644
index 000000000..bda454992
--- /dev/null
+++ b/hw/intc/mips_gic.c
@@ -0,0 +1,468 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ *
+ * Copyright (C) 2016 Imagination Technologies
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "hw/sysbus.h"
+#include "exec/memory.h"
+#include "sysemu/kvm.h"
+#include "sysemu/reset.h"
+#include "kvm_mips.h"
+#include "hw/intc/mips_gic.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+
+static void mips_gic_set_vp_irq(MIPSGICState *gic, int vp, int pin)
+{
+ int ored_level = 0;
+ int i;
+
+ /* ORing pending registers sharing same pin */
+ for (i = 0; i < gic->num_irq; i++) {
+ if ((gic->irq_state[i].map_pin & GIC_MAP_MSK) == pin &&
+ gic->irq_state[i].map_vp == vp &&
+ gic->irq_state[i].enabled) {
+ ored_level |= gic->irq_state[i].pending;
+ }
+ if (ored_level) {
+ /* no need to iterate all interrupts */
+ break;
+ }
+ }
+ if (((gic->vps[vp].compare_map & GIC_MAP_MSK) == pin) &&
+ (gic->vps[vp].mask & GIC_VP_MASK_CMP_MSK)) {
+ /* ORing with local pending register (count/compare) */
+ ored_level |= (gic->vps[vp].pend & GIC_VP_MASK_CMP_MSK) >>
+ GIC_VP_MASK_CMP_SHF;
+ }
+ if (kvm_enabled()) {
+ kvm_mips_set_ipi_interrupt(env_archcpu(gic->vps[vp].env),
+ pin + GIC_CPU_PIN_OFFSET,
+ ored_level);
+ } else {
+ qemu_set_irq(gic->vps[vp].env->irq[pin + GIC_CPU_PIN_OFFSET],
+ ored_level);
+ }
+}
+
+static void gic_update_pin_for_irq(MIPSGICState *gic, int n_IRQ)
+{
+ int vp = gic->irq_state[n_IRQ].map_vp;
+ int pin = gic->irq_state[n_IRQ].map_pin & GIC_MAP_MSK;
+
+ if (vp < 0 || vp >= gic->num_vps) {
+ return;
+ }
+ mips_gic_set_vp_irq(gic, vp, pin);
+}
+
+static void gic_set_irq(void *opaque, int n_IRQ, int level)
+{
+ MIPSGICState *gic = (MIPSGICState *) opaque;
+
+ gic->irq_state[n_IRQ].pending = (uint8_t) level;
+ if (!gic->irq_state[n_IRQ].enabled) {
+ /* GIC interrupt source disabled */
+ return;
+ }
+ gic_update_pin_for_irq(gic, n_IRQ);
+}
+
+#define OFFSET_CHECK(c) \
+ do { \
+ if (!(c)) { \
+ goto bad_offset; \
+ } \
+ } while (0)
+
+/* GIC Read VP Local/Other Registers */
+static uint64_t gic_read_vp(MIPSGICState *gic, uint32_t vp_index, hwaddr addr,
+ unsigned size)
+{
+ switch (addr) {
+ case GIC_VP_CTL_OFS:
+ return gic->vps[vp_index].ctl;
+ case GIC_VP_PEND_OFS:
+ mips_gictimer_get_sh_count(gic->gic_timer);
+ return gic->vps[vp_index].pend;
+ case GIC_VP_MASK_OFS:
+ return gic->vps[vp_index].mask;
+ case GIC_VP_COMPARE_MAP_OFS:
+ return gic->vps[vp_index].compare_map;
+ case GIC_VP_OTHER_ADDR_OFS:
+ return gic->vps[vp_index].other_addr;
+ case GIC_VP_IDENT_OFS:
+ return vp_index;
+ case GIC_VP_COMPARE_LO_OFS:
+ return mips_gictimer_get_vp_compare(gic->gic_timer, vp_index);
+ case GIC_VP_COMPARE_HI_OFS:
+ return 0;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Read %d bytes at GIC offset LOCAL/OTHER 0x%"
+ PRIx64 "\n", size, addr);
+ break;
+ }
+ return 0;
+}
+
+static uint64_t gic_read(void *opaque, hwaddr addr, unsigned size)
+{
+ MIPSGICState *gic = (MIPSGICState *) opaque;
+ uint32_t vp_index = current_cpu->cpu_index;
+ uint64_t ret = 0;
+ int i, base, irq_src;
+ uint32_t other_index;
+
+ switch (addr) {
+ case GIC_SH_CONFIG_OFS:
+ ret = gic->sh_config | (mips_gictimer_get_countstop(gic->gic_timer) <<
+ GIC_SH_CONFIG_COUNTSTOP_SHF);
+ break;
+ case GIC_SH_COUNTERLO_OFS:
+ ret = mips_gictimer_get_sh_count(gic->gic_timer);
+ break;
+ case GIC_SH_COUNTERHI_OFS:
+ ret = 0;
+ break;
+ case GIC_SH_PEND_OFS ... GIC_SH_PEND_LAST_OFS:
+ /* each bit represents pending status for an interrupt pin */
+ base = (addr - GIC_SH_PEND_OFS) * 8;
+ OFFSET_CHECK((base + size * 8) <= gic->num_irq);
+ for (i = 0; i < size * 8; i++) {
+ ret |= (uint64_t) (gic->irq_state[base + i].pending) << i;
+ }
+ break;
+ case GIC_SH_MASK_OFS ... GIC_SH_MASK_LAST_OFS:
+ /* each bit represents status for an interrupt pin */
+ base = (addr - GIC_SH_MASK_OFS) * 8;
+ OFFSET_CHECK((base + size * 8) <= gic->num_irq);
+ for (i = 0; i < size * 8; i++) {
+ ret |= (uint64_t) (gic->irq_state[base + i].enabled) << i;
+ }
+ break;
+ case GIC_SH_MAP0_PIN_OFS ... GIC_SH_MAP255_PIN_OFS:
+ /* 32 bits per a pin */
+ irq_src = (addr - GIC_SH_MAP0_PIN_OFS) / 4;
+ OFFSET_CHECK(irq_src < gic->num_irq);
+ ret = gic->irq_state[irq_src].map_pin;
+ break;
+ case GIC_SH_MAP0_VP_OFS ... GIC_SH_MAP255_VP_LAST_OFS:
+ /* up to 32 bytes per a pin */
+ irq_src = (addr - GIC_SH_MAP0_VP_OFS) / 32;
+ OFFSET_CHECK(irq_src < gic->num_irq);
+ if ((gic->irq_state[irq_src].map_vp) >= 0) {
+ ret = (uint64_t) 1 << (gic->irq_state[irq_src].map_vp);
+ } else {
+ ret = 0;
+ }
+ break;
+ /* VP-Local Register */
+ case VP_LOCAL_SECTION_OFS ... (VP_LOCAL_SECTION_OFS + GIC_VL_BRK_GROUP):
+ ret = gic_read_vp(gic, vp_index, addr - VP_LOCAL_SECTION_OFS, size);
+ break;
+ /* VP-Other Register */
+ case VP_OTHER_SECTION_OFS ... (VP_OTHER_SECTION_OFS + GIC_VL_BRK_GROUP):
+ other_index = gic->vps[vp_index].other_addr;
+ ret = gic_read_vp(gic, other_index, addr - VP_OTHER_SECTION_OFS, size);
+ break;
+ /* User-Mode Visible section */
+ case USM_VISIBLE_SECTION_OFS + GIC_USER_MODE_COUNTERLO:
+ ret = mips_gictimer_get_sh_count(gic->gic_timer);
+ break;
+ case USM_VISIBLE_SECTION_OFS + GIC_USER_MODE_COUNTERHI:
+ ret = 0;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Read %d bytes at GIC offset 0x%" PRIx64 "\n",
+ size, addr);
+ break;
+ }
+ return ret;
+bad_offset:
+ qemu_log_mask(LOG_GUEST_ERROR, "Wrong GIC offset at 0x%" PRIx64 "\n", addr);
+ return 0;
+}
+
+static void gic_timer_expire_cb(void *opaque, uint32_t vp_index)
+{
+ MIPSGICState *gic = opaque;
+
+ gic->vps[vp_index].pend |= (1 << GIC_LOCAL_INT_COMPARE);
+ if (gic->vps[vp_index].pend &
+ (gic->vps[vp_index].mask & GIC_VP_MASK_CMP_MSK)) {
+ if (gic->vps[vp_index].compare_map & GIC_MAP_TO_PIN_MSK) {
+ /* it is safe to set the irq high regardless of other GIC IRQs */
+ uint32_t pin = (gic->vps[vp_index].compare_map & GIC_MAP_MSK);
+ qemu_irq_raise(gic->vps[vp_index].env->irq
+ [pin + GIC_CPU_PIN_OFFSET]);
+ }
+ }
+}
+
+static void gic_timer_store_vp_compare(MIPSGICState *gic, uint32_t vp_index,
+ uint64_t compare)
+{
+ gic->vps[vp_index].pend &= ~(1 << GIC_LOCAL_INT_COMPARE);
+ if (gic->vps[vp_index].compare_map & GIC_MAP_TO_PIN_MSK) {
+ uint32_t pin = (gic->vps[vp_index].compare_map & GIC_MAP_MSK);
+ mips_gic_set_vp_irq(gic, vp_index, pin);
+ }
+ mips_gictimer_store_vp_compare(gic->gic_timer, vp_index, compare);
+}
+
+/* GIC Write VP Local/Other Registers */
+static void gic_write_vp(MIPSGICState *gic, uint32_t vp_index, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ switch (addr) {
+ case GIC_VP_CTL_OFS:
+ /* EIC isn't supported */
+ break;
+ case GIC_VP_RMASK_OFS:
+ gic->vps[vp_index].mask &= ~(data & GIC_VP_SET_RESET_MSK) &
+ GIC_VP_SET_RESET_MSK;
+ break;
+ case GIC_VP_SMASK_OFS:
+ gic->vps[vp_index].mask |= data & GIC_VP_SET_RESET_MSK;
+ break;
+ case GIC_VP_COMPARE_MAP_OFS:
+ /* EIC isn't supported */
+ OFFSET_CHECK((data & GIC_MAP_MSK) <= GIC_CPU_INT_MAX);
+ gic->vps[vp_index].compare_map = data & GIC_MAP_TO_PIN_REG_MSK;
+ break;
+ case GIC_VP_OTHER_ADDR_OFS:
+ OFFSET_CHECK(data < gic->num_vps);
+ gic->vps[vp_index].other_addr = data;
+ break;
+ case GIC_VP_COMPARE_LO_OFS:
+ gic_timer_store_vp_compare(gic, vp_index, data);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Write %d bytes at GIC offset LOCAL/OTHER "
+ "0x%" PRIx64" 0x%08" PRIx64 "\n", size, addr, data);
+ break;
+ }
+ return;
+bad_offset:
+ qemu_log_mask(LOG_GUEST_ERROR, "Wrong GIC offset at 0x%" PRIx64 "\n", addr);
+ return;
+}
+
+static void gic_write(void *opaque, hwaddr addr, uint64_t data, unsigned size)
+{
+ int intr;
+ MIPSGICState *gic = (MIPSGICState *) opaque;
+ uint32_t vp_index = current_cpu->cpu_index;
+ int i, base, irq_src;
+ uint32_t other_index;
+
+ switch (addr) {
+ case GIC_SH_CONFIG_OFS:
+ {
+ uint32_t pre_cntstop = mips_gictimer_get_countstop(gic->gic_timer);
+ uint32_t new_cntstop = (data & GIC_SH_CONFIG_COUNTSTOP_MSK) >>
+ GIC_SH_CONFIG_COUNTSTOP_SHF;
+ if (pre_cntstop != new_cntstop) {
+ if (new_cntstop == 1) {
+ mips_gictimer_stop_count(gic->gic_timer);
+ } else {
+ mips_gictimer_start_count(gic->gic_timer);
+ }
+ }
+ }
+ break;
+ case GIC_SH_COUNTERLO_OFS:
+ if (mips_gictimer_get_countstop(gic->gic_timer)) {
+ mips_gictimer_store_sh_count(gic->gic_timer, data);
+ }
+ break;
+ case GIC_SH_RMASK_OFS ... GIC_SH_RMASK_LAST_OFS:
+ /* up to 64 bits per a pin */
+ base = (addr - GIC_SH_RMASK_OFS) * 8;
+ OFFSET_CHECK((base + size * 8) <= gic->num_irq);
+ for (i = 0; i < size * 8; i++) {
+ gic->irq_state[base + i].enabled &= !((data >> i) & 1);
+ gic_update_pin_for_irq(gic, base + i);
+ }
+ break;
+ case GIC_SH_WEDGE_OFS:
+ /* Figure out which VP/HW Interrupt this maps to */
+ intr = data & ~GIC_SH_WEDGE_RW_MSK;
+ /* Mask/Enabled Checks */
+ OFFSET_CHECK(intr < gic->num_irq);
+ if (data & GIC_SH_WEDGE_RW_MSK) {
+ gic_set_irq(gic, intr, 1);
+ } else {
+ gic_set_irq(gic, intr, 0);
+ }
+ break;
+ case GIC_SH_SMASK_OFS ... GIC_SH_SMASK_LAST_OFS:
+ /* up to 64 bits per a pin */
+ base = (addr - GIC_SH_SMASK_OFS) * 8;
+ OFFSET_CHECK((base + size * 8) <= gic->num_irq);
+ for (i = 0; i < size * 8; i++) {
+ gic->irq_state[base + i].enabled |= (data >> i) & 1;
+ gic_update_pin_for_irq(gic, base + i);
+ }
+ break;
+ case GIC_SH_MAP0_PIN_OFS ... GIC_SH_MAP255_PIN_OFS:
+ /* 32 bits per a pin */
+ irq_src = (addr - GIC_SH_MAP0_PIN_OFS) / 4;
+ OFFSET_CHECK(irq_src < gic->num_irq);
+ /* EIC isn't supported */
+ OFFSET_CHECK((data & GIC_MAP_MSK) <= GIC_CPU_INT_MAX);
+ gic->irq_state[irq_src].map_pin = data & GIC_MAP_TO_PIN_REG_MSK;
+ break;
+ case GIC_SH_MAP0_VP_OFS ... GIC_SH_MAP255_VP_LAST_OFS:
+ /* up to 32 bytes per a pin */
+ irq_src = (addr - GIC_SH_MAP0_VP_OFS) / 32;
+ OFFSET_CHECK(irq_src < gic->num_irq);
+ data = data ? ctz64(data) : -1;
+ OFFSET_CHECK(data < gic->num_vps);
+ gic->irq_state[irq_src].map_vp = data;
+ break;
+ case VP_LOCAL_SECTION_OFS ... (VP_LOCAL_SECTION_OFS + GIC_VL_BRK_GROUP):
+ gic_write_vp(gic, vp_index, addr - VP_LOCAL_SECTION_OFS, data, size);
+ break;
+ case VP_OTHER_SECTION_OFS ... (VP_OTHER_SECTION_OFS + GIC_VL_BRK_GROUP):
+ other_index = gic->vps[vp_index].other_addr;
+ gic_write_vp(gic, other_index, addr - VP_OTHER_SECTION_OFS, data, size);
+ break;
+ case USM_VISIBLE_SECTION_OFS + GIC_USER_MODE_COUNTERLO:
+ case USM_VISIBLE_SECTION_OFS + GIC_USER_MODE_COUNTERHI:
+ /* do nothing. Read-only section */
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Write %d bytes at GIC offset 0x%" PRIx64
+ " 0x%08" PRIx64 "\n", size, addr, data);
+ break;
+ }
+ return;
+bad_offset:
+ qemu_log_mask(LOG_GUEST_ERROR, "Wrong GIC offset at 0x%" PRIx64 "\n", addr);
+}
+
+static void gic_reset(void *opaque)
+{
+ int i;
+ MIPSGICState *gic = (MIPSGICState *) opaque;
+ int numintrs = (gic->num_irq / 8) - 1;
+
+ gic->sh_config = /* COUNTSTOP = 0 it is accessible via MIPSGICTimer*/
+ /* CounterHi not implemented */
+ (0 << GIC_SH_CONFIG_COUNTBITS_SHF) |
+ (numintrs << GIC_SH_CONFIG_NUMINTRS_SHF) |
+ (gic->num_vps << GIC_SH_CONFIG_PVPS_SHF);
+ for (i = 0; i < gic->num_vps; i++) {
+ gic->vps[i].ctl = 0x0;
+ gic->vps[i].pend = 0x0;
+ /* PERFCNT, TIMER and WD not implemented */
+ gic->vps[i].mask = 0x32;
+ gic->vps[i].compare_map = GIC_MAP_TO_PIN_MSK;
+ mips_gictimer_store_vp_compare(gic->gic_timer, i, 0xffffffff);
+ gic->vps[i].other_addr = 0x0;
+ }
+ for (i = 0; i < gic->num_irq; i++) {
+ gic->irq_state[i].enabled = 0;
+ gic->irq_state[i].pending = 0;
+ gic->irq_state[i].map_pin = GIC_MAP_TO_PIN_MSK;
+ gic->irq_state[i].map_vp = -1;
+ }
+ mips_gictimer_store_sh_count(gic->gic_timer, 0);
+ /* COUNTSTOP = 0 */
+ mips_gictimer_start_count(gic->gic_timer);
+}
+
+static const MemoryRegionOps gic_ops = {
+ .read = gic_read,
+ .write = gic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .max_access_size = 8,
+ },
+};
+
+static void mips_gic_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ MIPSGICState *s = MIPS_GIC(obj);
+
+ memory_region_init_io(&s->mr, OBJECT(s), &gic_ops, s,
+ "mips-gic", GIC_ADDRSPACE_SZ);
+ sysbus_init_mmio(sbd, &s->mr);
+ qemu_register_reset(gic_reset, s);
+}
+
+static void mips_gic_realize(DeviceState *dev, Error **errp)
+{
+ MIPSGICState *s = MIPS_GIC(dev);
+ CPUState *cs = first_cpu;
+ int i;
+
+ if (s->num_vps > GIC_MAX_VPS) {
+ error_setg(errp, "Exceeded maximum CPUs %d", s->num_vps);
+ return;
+ }
+ if ((s->num_irq > GIC_MAX_INTRS) || (s->num_irq % 8) || (s->num_irq <= 0)) {
+ error_setg(errp, "GIC supports up to %d external interrupts in "
+ "multiples of 8 : %d", GIC_MAX_INTRS, s->num_irq);
+ return;
+ }
+ s->vps = g_new(MIPSGICVPState, s->num_vps);
+ s->irq_state = g_new(MIPSGICIRQState, s->num_irq);
+ /* Register the env for all VPs with the GIC */
+ for (i = 0; i < s->num_vps; i++) {
+ if (cs != NULL) {
+ s->vps[i].env = cs->env_ptr;
+ cs = CPU_NEXT(cs);
+ } else {
+ error_setg(errp,
+ "Unable to initialize GIC, CPUState for CPU#%d not valid.", i);
+ return;
+ }
+ }
+ s->gic_timer = mips_gictimer_init(s, s->num_vps, gic_timer_expire_cb);
+ qdev_init_gpio_in(dev, gic_set_irq, s->num_irq);
+ for (i = 0; i < s->num_irq; i++) {
+ s->irq_state[i].irq = qdev_get_gpio_in(dev, i);
+ }
+}
+
+static Property mips_gic_properties[] = {
+ DEFINE_PROP_INT32("num-vp", MIPSGICState, num_vps, 1),
+ DEFINE_PROP_INT32("num-irq", MIPSGICState, num_irq, 256),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void mips_gic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, mips_gic_properties);
+ dc->realize = mips_gic_realize;
+}
+
+static const TypeInfo mips_gic_info = {
+ .name = TYPE_MIPS_GIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MIPSGICState),
+ .instance_init = mips_gic_init,
+ .class_init = mips_gic_class_init,
+};
+
+static void mips_gic_register_types(void)
+{
+ type_register_static(&mips_gic_info);
+}
+
+type_init(mips_gic_register_types)
diff --git a/hw/intc/omap_intc.c b/hw/intc/omap_intc.c
new file mode 100644
index 000000000..d7183d035
--- /dev/null
+++ b/hw/intc/omap_intc.c
@@ -0,0 +1,690 @@
+/*
+ * TI OMAP interrupt controller emulation.
+ *
+ * Copyright (C) 2006-2008 Andrzej Zaborowski <balrog@zabor.org>
+ * Copyright (C) 2007-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/arm/omap.h"
+#include "hw/sysbus.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+
+/* Interrupt Handlers */
+struct omap_intr_handler_bank_s {
+ uint32_t irqs;
+ uint32_t inputs;
+ uint32_t mask;
+ uint32_t fiq;
+ uint32_t sens_edge;
+ uint32_t swi;
+ unsigned char priority[32];
+};
+
+struct omap_intr_handler_s {
+ SysBusDevice parent_obj;
+
+ qemu_irq *pins;
+ qemu_irq parent_intr[2];
+ MemoryRegion mmio;
+ void *iclk;
+ void *fclk;
+ unsigned char nbanks;
+ int level_only;
+ uint32_t size;
+
+ uint8_t revision;
+
+ /* state */
+ uint32_t new_agr[2];
+ int sir_intr[2];
+ int autoidle;
+ uint32_t mask;
+ struct omap_intr_handler_bank_s bank[3];
+};
+
+static void omap_inth_sir_update(struct omap_intr_handler_s *s, int is_fiq)
+{
+ int i, j, sir_intr, p_intr, p;
+ uint32_t level;
+ sir_intr = 0;
+ p_intr = 255;
+
+ /* Find the interrupt line with the highest dynamic priority.
+ * Note: 0 denotes the hightest priority.
+ * If all interrupts have the same priority, the default order is IRQ_N,
+ * IRQ_N-1,...,IRQ_0. */
+ for (j = 0; j < s->nbanks; ++j) {
+ level = s->bank[j].irqs & ~s->bank[j].mask &
+ (is_fiq ? s->bank[j].fiq : ~s->bank[j].fiq);
+
+ while (level != 0) {
+ i = ctz32(level);
+ p = s->bank[j].priority[i];
+ if (p <= p_intr) {
+ p_intr = p;
+ sir_intr = 32 * j + i;
+ }
+ level &= level - 1;
+ }
+ }
+ s->sir_intr[is_fiq] = sir_intr;
+}
+
+static inline void omap_inth_update(struct omap_intr_handler_s *s, int is_fiq)
+{
+ int i;
+ uint32_t has_intr = 0;
+
+ for (i = 0; i < s->nbanks; ++i)
+ has_intr |= s->bank[i].irqs & ~s->bank[i].mask &
+ (is_fiq ? s->bank[i].fiq : ~s->bank[i].fiq);
+
+ if (s->new_agr[is_fiq] & has_intr & s->mask) {
+ s->new_agr[is_fiq] = 0;
+ omap_inth_sir_update(s, is_fiq);
+ qemu_set_irq(s->parent_intr[is_fiq], 1);
+ }
+}
+
+#define INT_FALLING_EDGE 0
+#define INT_LOW_LEVEL 1
+
+static void omap_set_intr(void *opaque, int irq, int req)
+{
+ struct omap_intr_handler_s *ih = (struct omap_intr_handler_s *) opaque;
+ uint32_t rise;
+
+ struct omap_intr_handler_bank_s *bank = &ih->bank[irq >> 5];
+ int n = irq & 31;
+
+ if (req) {
+ rise = ~bank->irqs & (1 << n);
+ if (~bank->sens_edge & (1 << n))
+ rise &= ~bank->inputs;
+
+ bank->inputs |= (1 << n);
+ if (rise) {
+ bank->irqs |= rise;
+ omap_inth_update(ih, 0);
+ omap_inth_update(ih, 1);
+ }
+ } else {
+ rise = bank->sens_edge & bank->irqs & (1 << n);
+ bank->irqs &= ~rise;
+ bank->inputs &= ~(1 << n);
+ }
+}
+
+/* Simplified version with no edge detection */
+static void omap_set_intr_noedge(void *opaque, int irq, int req)
+{
+ struct omap_intr_handler_s *ih = (struct omap_intr_handler_s *) opaque;
+ uint32_t rise;
+
+ struct omap_intr_handler_bank_s *bank = &ih->bank[irq >> 5];
+ int n = irq & 31;
+
+ if (req) {
+ rise = ~bank->inputs & (1 << n);
+ if (rise) {
+ bank->irqs |= bank->inputs |= rise;
+ omap_inth_update(ih, 0);
+ omap_inth_update(ih, 1);
+ }
+ } else
+ bank->irqs = (bank->inputs &= ~(1 << n)) | bank->swi;
+}
+
+static uint64_t omap_inth_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque;
+ int i, offset = addr;
+ int bank_no = offset >> 8;
+ int line_no;
+ struct omap_intr_handler_bank_s *bank = &s->bank[bank_no];
+ offset &= 0xff;
+
+ switch (offset) {
+ case 0x00: /* ITR */
+ return bank->irqs;
+
+ case 0x04: /* MIR */
+ return bank->mask;
+
+ case 0x10: /* SIR_IRQ_CODE */
+ case 0x14: /* SIR_FIQ_CODE */
+ if (bank_no != 0)
+ break;
+ line_no = s->sir_intr[(offset - 0x10) >> 2];
+ bank = &s->bank[line_no >> 5];
+ i = line_no & 31;
+ if (((bank->sens_edge >> i) & 1) == INT_FALLING_EDGE)
+ bank->irqs &= ~(1 << i);
+ return line_no;
+
+ case 0x18: /* CONTROL_REG */
+ if (bank_no != 0)
+ break;
+ return 0;
+
+ case 0x1c: /* ILR0 */
+ case 0x20: /* ILR1 */
+ case 0x24: /* ILR2 */
+ case 0x28: /* ILR3 */
+ case 0x2c: /* ILR4 */
+ case 0x30: /* ILR5 */
+ case 0x34: /* ILR6 */
+ case 0x38: /* ILR7 */
+ case 0x3c: /* ILR8 */
+ case 0x40: /* ILR9 */
+ case 0x44: /* ILR10 */
+ case 0x48: /* ILR11 */
+ case 0x4c: /* ILR12 */
+ case 0x50: /* ILR13 */
+ case 0x54: /* ILR14 */
+ case 0x58: /* ILR15 */
+ case 0x5c: /* ILR16 */
+ case 0x60: /* ILR17 */
+ case 0x64: /* ILR18 */
+ case 0x68: /* ILR19 */
+ case 0x6c: /* ILR20 */
+ case 0x70: /* ILR21 */
+ case 0x74: /* ILR22 */
+ case 0x78: /* ILR23 */
+ case 0x7c: /* ILR24 */
+ case 0x80: /* ILR25 */
+ case 0x84: /* ILR26 */
+ case 0x88: /* ILR27 */
+ case 0x8c: /* ILR28 */
+ case 0x90: /* ILR29 */
+ case 0x94: /* ILR30 */
+ case 0x98: /* ILR31 */
+ i = (offset - 0x1c) >> 2;
+ return (bank->priority[i] << 2) |
+ (((bank->sens_edge >> i) & 1) << 1) |
+ ((bank->fiq >> i) & 1);
+
+ case 0x9c: /* ISR */
+ return 0x00000000;
+
+ }
+ OMAP_BAD_REG(addr);
+ return 0;
+}
+
+static void omap_inth_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque;
+ int i, offset = addr;
+ int bank_no = offset >> 8;
+ struct omap_intr_handler_bank_s *bank = &s->bank[bank_no];
+ offset &= 0xff;
+
+ switch (offset) {
+ case 0x00: /* ITR */
+ /* Important: ignore the clearing if the IRQ is level-triggered and
+ the input bit is 1 */
+ bank->irqs &= value | (bank->inputs & bank->sens_edge);
+ return;
+
+ case 0x04: /* MIR */
+ bank->mask = value;
+ omap_inth_update(s, 0);
+ omap_inth_update(s, 1);
+ return;
+
+ case 0x10: /* SIR_IRQ_CODE */
+ case 0x14: /* SIR_FIQ_CODE */
+ OMAP_RO_REG(addr);
+ break;
+
+ case 0x18: /* CONTROL_REG */
+ if (bank_no != 0)
+ break;
+ if (value & 2) {
+ qemu_set_irq(s->parent_intr[1], 0);
+ s->new_agr[1] = ~0;
+ omap_inth_update(s, 1);
+ }
+ if (value & 1) {
+ qemu_set_irq(s->parent_intr[0], 0);
+ s->new_agr[0] = ~0;
+ omap_inth_update(s, 0);
+ }
+ return;
+
+ case 0x1c: /* ILR0 */
+ case 0x20: /* ILR1 */
+ case 0x24: /* ILR2 */
+ case 0x28: /* ILR3 */
+ case 0x2c: /* ILR4 */
+ case 0x30: /* ILR5 */
+ case 0x34: /* ILR6 */
+ case 0x38: /* ILR7 */
+ case 0x3c: /* ILR8 */
+ case 0x40: /* ILR9 */
+ case 0x44: /* ILR10 */
+ case 0x48: /* ILR11 */
+ case 0x4c: /* ILR12 */
+ case 0x50: /* ILR13 */
+ case 0x54: /* ILR14 */
+ case 0x58: /* ILR15 */
+ case 0x5c: /* ILR16 */
+ case 0x60: /* ILR17 */
+ case 0x64: /* ILR18 */
+ case 0x68: /* ILR19 */
+ case 0x6c: /* ILR20 */
+ case 0x70: /* ILR21 */
+ case 0x74: /* ILR22 */
+ case 0x78: /* ILR23 */
+ case 0x7c: /* ILR24 */
+ case 0x80: /* ILR25 */
+ case 0x84: /* ILR26 */
+ case 0x88: /* ILR27 */
+ case 0x8c: /* ILR28 */
+ case 0x90: /* ILR29 */
+ case 0x94: /* ILR30 */
+ case 0x98: /* ILR31 */
+ i = (offset - 0x1c) >> 2;
+ bank->priority[i] = (value >> 2) & 0x1f;
+ bank->sens_edge &= ~(1 << i);
+ bank->sens_edge |= ((value >> 1) & 1) << i;
+ bank->fiq &= ~(1 << i);
+ bank->fiq |= (value & 1) << i;
+ return;
+
+ case 0x9c: /* ISR */
+ for (i = 0; i < 32; i ++)
+ if (value & (1 << i)) {
+ omap_set_intr(s, 32 * bank_no + i, 1);
+ return;
+ }
+ return;
+ }
+ OMAP_BAD_REG(addr);
+}
+
+static const MemoryRegionOps omap_inth_mem_ops = {
+ .read = omap_inth_read,
+ .write = omap_inth_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void omap_inth_reset(DeviceState *dev)
+{
+ struct omap_intr_handler_s *s = OMAP_INTC(dev);
+ int i;
+
+ for (i = 0; i < s->nbanks; ++i){
+ s->bank[i].irqs = 0x00000000;
+ s->bank[i].mask = 0xffffffff;
+ s->bank[i].sens_edge = 0x00000000;
+ s->bank[i].fiq = 0x00000000;
+ s->bank[i].inputs = 0x00000000;
+ s->bank[i].swi = 0x00000000;
+ memset(s->bank[i].priority, 0, sizeof(s->bank[i].priority));
+
+ if (s->level_only)
+ s->bank[i].sens_edge = 0xffffffff;
+ }
+
+ s->new_agr[0] = ~0;
+ s->new_agr[1] = ~0;
+ s->sir_intr[0] = 0;
+ s->sir_intr[1] = 0;
+ s->autoidle = 0;
+ s->mask = ~0;
+
+ qemu_set_irq(s->parent_intr[0], 0);
+ qemu_set_irq(s->parent_intr[1], 0);
+}
+
+static void omap_intc_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ struct omap_intr_handler_s *s = OMAP_INTC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ s->nbanks = 1;
+ sysbus_init_irq(sbd, &s->parent_intr[0]);
+ sysbus_init_irq(sbd, &s->parent_intr[1]);
+ qdev_init_gpio_in(dev, omap_set_intr, s->nbanks * 32);
+ memory_region_init_io(&s->mmio, obj, &omap_inth_mem_ops, s,
+ "omap-intc", s->size);
+ sysbus_init_mmio(sbd, &s->mmio);
+}
+
+static void omap_intc_realize(DeviceState *dev, Error **errp)
+{
+ struct omap_intr_handler_s *s = OMAP_INTC(dev);
+
+ if (!s->iclk) {
+ error_setg(errp, "omap-intc: clk not connected");
+ }
+}
+
+void omap_intc_set_iclk(omap_intr_handler *intc, omap_clk clk)
+{
+ intc->iclk = clk;
+}
+
+void omap_intc_set_fclk(omap_intr_handler *intc, omap_clk clk)
+{
+ intc->fclk = clk;
+}
+
+static Property omap_intc_properties[] = {
+ DEFINE_PROP_UINT32("size", struct omap_intr_handler_s, size, 0x100),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void omap_intc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = omap_inth_reset;
+ device_class_set_props(dc, omap_intc_properties);
+ /* Reason: pointer property "clk" */
+ dc->user_creatable = false;
+ dc->realize = omap_intc_realize;
+}
+
+static const TypeInfo omap_intc_info = {
+ .name = "omap-intc",
+ .parent = TYPE_OMAP_INTC,
+ .instance_init = omap_intc_init,
+ .class_init = omap_intc_class_init,
+};
+
+static uint64_t omap2_inth_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque;
+ int offset = addr;
+ int bank_no, line_no;
+ struct omap_intr_handler_bank_s *bank = NULL;
+
+ if ((offset & 0xf80) == 0x80) {
+ bank_no = (offset & 0x60) >> 5;
+ if (bank_no < s->nbanks) {
+ offset &= ~0x60;
+ bank = &s->bank[bank_no];
+ } else {
+ OMAP_BAD_REG(addr);
+ return 0;
+ }
+ }
+
+ switch (offset) {
+ case 0x00: /* INTC_REVISION */
+ return s->revision;
+
+ case 0x10: /* INTC_SYSCONFIG */
+ return (s->autoidle >> 2) & 1;
+
+ case 0x14: /* INTC_SYSSTATUS */
+ return 1; /* RESETDONE */
+
+ case 0x40: /* INTC_SIR_IRQ */
+ return s->sir_intr[0];
+
+ case 0x44: /* INTC_SIR_FIQ */
+ return s->sir_intr[1];
+
+ case 0x48: /* INTC_CONTROL */
+ return (!s->mask) << 2; /* GLOBALMASK */
+
+ case 0x4c: /* INTC_PROTECTION */
+ return 0;
+
+ case 0x50: /* INTC_IDLE */
+ return s->autoidle & 3;
+
+ /* Per-bank registers */
+ case 0x80: /* INTC_ITR */
+ return bank->inputs;
+
+ case 0x84: /* INTC_MIR */
+ return bank->mask;
+
+ case 0x88: /* INTC_MIR_CLEAR */
+ case 0x8c: /* INTC_MIR_SET */
+ return 0;
+
+ case 0x90: /* INTC_ISR_SET */
+ return bank->swi;
+
+ case 0x94: /* INTC_ISR_CLEAR */
+ return 0;
+
+ case 0x98: /* INTC_PENDING_IRQ */
+ return bank->irqs & ~bank->mask & ~bank->fiq;
+
+ case 0x9c: /* INTC_PENDING_FIQ */
+ return bank->irqs & ~bank->mask & bank->fiq;
+
+ /* Per-line registers */
+ case 0x100 ... 0x300: /* INTC_ILR */
+ bank_no = (offset - 0x100) >> 7;
+ if (bank_no > s->nbanks)
+ break;
+ bank = &s->bank[bank_no];
+ line_no = (offset & 0x7f) >> 2;
+ return (bank->priority[line_no] << 2) |
+ ((bank->fiq >> line_no) & 1);
+ }
+ OMAP_BAD_REG(addr);
+ return 0;
+}
+
+static void omap2_inth_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque;
+ int offset = addr;
+ int bank_no, line_no;
+ struct omap_intr_handler_bank_s *bank = NULL;
+
+ if ((offset & 0xf80) == 0x80) {
+ bank_no = (offset & 0x60) >> 5;
+ if (bank_no < s->nbanks) {
+ offset &= ~0x60;
+ bank = &s->bank[bank_no];
+ } else {
+ OMAP_BAD_REG(addr);
+ return;
+ }
+ }
+
+ switch (offset) {
+ case 0x10: /* INTC_SYSCONFIG */
+ s->autoidle &= 4;
+ s->autoidle |= (value & 1) << 2;
+ if (value & 2) { /* SOFTRESET */
+ omap_inth_reset(DEVICE(s));
+ }
+ return;
+
+ case 0x48: /* INTC_CONTROL */
+ s->mask = (value & 4) ? 0 : ~0; /* GLOBALMASK */
+ if (value & 2) { /* NEWFIQAGR */
+ qemu_set_irq(s->parent_intr[1], 0);
+ s->new_agr[1] = ~0;
+ omap_inth_update(s, 1);
+ }
+ if (value & 1) { /* NEWIRQAGR */
+ qemu_set_irq(s->parent_intr[0], 0);
+ s->new_agr[0] = ~0;
+ omap_inth_update(s, 0);
+ }
+ return;
+
+ case 0x4c: /* INTC_PROTECTION */
+ /* TODO: Make a bitmap (or sizeof(char)map) of access privileges
+ * for every register, see Chapter 3 and 4 for privileged mode. */
+ if (value & 1)
+ fprintf(stderr, "%s: protection mode enable attempt\n",
+ __func__);
+ return;
+
+ case 0x50: /* INTC_IDLE */
+ s->autoidle &= ~3;
+ s->autoidle |= value & 3;
+ return;
+
+ /* Per-bank registers */
+ case 0x84: /* INTC_MIR */
+ bank->mask = value;
+ omap_inth_update(s, 0);
+ omap_inth_update(s, 1);
+ return;
+
+ case 0x88: /* INTC_MIR_CLEAR */
+ bank->mask &= ~value;
+ omap_inth_update(s, 0);
+ omap_inth_update(s, 1);
+ return;
+
+ case 0x8c: /* INTC_MIR_SET */
+ bank->mask |= value;
+ return;
+
+ case 0x90: /* INTC_ISR_SET */
+ bank->irqs |= bank->swi |= value;
+ omap_inth_update(s, 0);
+ omap_inth_update(s, 1);
+ return;
+
+ case 0x94: /* INTC_ISR_CLEAR */
+ bank->swi &= ~value;
+ bank->irqs = bank->swi & bank->inputs;
+ return;
+
+ /* Per-line registers */
+ case 0x100 ... 0x300: /* INTC_ILR */
+ bank_no = (offset - 0x100) >> 7;
+ if (bank_no > s->nbanks)
+ break;
+ bank = &s->bank[bank_no];
+ line_no = (offset & 0x7f) >> 2;
+ bank->priority[line_no] = (value >> 2) & 0x3f;
+ bank->fiq &= ~(1 << line_no);
+ bank->fiq |= (value & 1) << line_no;
+ return;
+
+ case 0x00: /* INTC_REVISION */
+ case 0x14: /* INTC_SYSSTATUS */
+ case 0x40: /* INTC_SIR_IRQ */
+ case 0x44: /* INTC_SIR_FIQ */
+ case 0x80: /* INTC_ITR */
+ case 0x98: /* INTC_PENDING_IRQ */
+ case 0x9c: /* INTC_PENDING_FIQ */
+ OMAP_RO_REG(addr);
+ return;
+ }
+ OMAP_BAD_REG(addr);
+}
+
+static const MemoryRegionOps omap2_inth_mem_ops = {
+ .read = omap2_inth_read,
+ .write = omap2_inth_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void omap2_intc_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ struct omap_intr_handler_s *s = OMAP_INTC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ s->level_only = 1;
+ s->nbanks = 3;
+ sysbus_init_irq(sbd, &s->parent_intr[0]);
+ sysbus_init_irq(sbd, &s->parent_intr[1]);
+ qdev_init_gpio_in(dev, omap_set_intr_noedge, s->nbanks * 32);
+ memory_region_init_io(&s->mmio, obj, &omap2_inth_mem_ops, s,
+ "omap2-intc", 0x1000);
+ sysbus_init_mmio(sbd, &s->mmio);
+}
+
+static void omap2_intc_realize(DeviceState *dev, Error **errp)
+{
+ struct omap_intr_handler_s *s = OMAP_INTC(dev);
+
+ if (!s->iclk) {
+ error_setg(errp, "omap2-intc: iclk not connected");
+ return;
+ }
+ if (!s->fclk) {
+ error_setg(errp, "omap2-intc: fclk not connected");
+ return;
+ }
+}
+
+static Property omap2_intc_properties[] = {
+ DEFINE_PROP_UINT8("revision", struct omap_intr_handler_s,
+ revision, 0x21),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void omap2_intc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = omap_inth_reset;
+ device_class_set_props(dc, omap2_intc_properties);
+ /* Reason: pointer property "iclk", "fclk" */
+ dc->user_creatable = false;
+ dc->realize = omap2_intc_realize;
+}
+
+static const TypeInfo omap2_intc_info = {
+ .name = "omap2-intc",
+ .parent = TYPE_OMAP_INTC,
+ .instance_init = omap2_intc_init,
+ .class_init = omap2_intc_class_init,
+};
+
+static const TypeInfo omap_intc_type_info = {
+ .name = TYPE_OMAP_INTC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(omap_intr_handler),
+ .abstract = true,
+};
+
+static void omap_intc_register_types(void)
+{
+ type_register_static(&omap_intc_type_info);
+ type_register_static(&omap_intc_info);
+ type_register_static(&omap2_intc_info);
+}
+
+type_init(omap_intc_register_types)
diff --git a/hw/intc/ompic.c b/hw/intc/ompic.c
new file mode 100644
index 000000000..1f1031480
--- /dev/null
+++ b/hw/intc/ompic.c
@@ -0,0 +1,181 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Stafford Horne <shorne@gmail.com>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "exec/memory.h"
+#include "qom/object.h"
+
+#define TYPE_OR1K_OMPIC "or1k-ompic"
+OBJECT_DECLARE_SIMPLE_TYPE(OR1KOMPICState, OR1K_OMPIC)
+
+#define OMPIC_CTRL_IRQ_ACK (1 << 31)
+#define OMPIC_CTRL_IRQ_GEN (1 << 30)
+#define OMPIC_CTRL_DST(cpu) (((cpu) >> 16) & 0x3fff)
+
+#define OMPIC_REG(addr) (((addr) >> 2) & 0x1)
+#define OMPIC_SRC_CPU(addr) (((addr) >> 3) & 0x4f)
+#define OMPIC_DST_CPU(addr) (((addr) >> 3) & 0x4f)
+
+#define OMPIC_STATUS_IRQ_PENDING (1 << 30)
+#define OMPIC_STATUS_SRC(cpu) (((cpu) & 0x3fff) << 16)
+#define OMPIC_STATUS_DATA(data) ((data) & 0xffff)
+
+#define OMPIC_CONTROL 0
+#define OMPIC_STATUS 1
+
+#define OMPIC_MAX_CPUS 4 /* Real max is much higher, but dont waste memory */
+#define OMPIC_ADDRSPACE_SZ (OMPIC_MAX_CPUS * 2 * 4) /* 2 32-bit regs per cpu */
+
+typedef struct OR1KOMPICCPUState OR1KOMPICCPUState;
+
+struct OR1KOMPICCPUState {
+ qemu_irq irq;
+ uint32_t status;
+ uint32_t control;
+};
+
+struct OR1KOMPICState {
+ SysBusDevice parent_obj;
+ MemoryRegion mr;
+
+ OR1KOMPICCPUState cpus[OMPIC_MAX_CPUS];
+
+ uint32_t num_cpus;
+};
+
+static uint64_t ompic_read(void *opaque, hwaddr addr, unsigned size)
+{
+ OR1KOMPICState *s = opaque;
+ int src_cpu = OMPIC_SRC_CPU(addr);
+
+ /* We can only write to control control, write control + update status */
+ if (OMPIC_REG(addr) == OMPIC_CONTROL) {
+ return s->cpus[src_cpu].control;
+ } else {
+ return s->cpus[src_cpu].status;
+ }
+
+}
+
+static void ompic_write(void *opaque, hwaddr addr, uint64_t data, unsigned size)
+{
+ OR1KOMPICState *s = opaque;
+ /* We can only write to control control, write control + update status */
+ if (OMPIC_REG(addr) == OMPIC_CONTROL) {
+ int src_cpu = OMPIC_SRC_CPU(addr);
+
+ s->cpus[src_cpu].control = data;
+
+ if (data & OMPIC_CTRL_IRQ_GEN) {
+ int dst_cpu = OMPIC_CTRL_DST(data);
+
+ s->cpus[dst_cpu].status = OMPIC_STATUS_IRQ_PENDING |
+ OMPIC_STATUS_SRC(src_cpu) |
+ OMPIC_STATUS_DATA(data);
+
+ qemu_irq_raise(s->cpus[dst_cpu].irq);
+ }
+ if (data & OMPIC_CTRL_IRQ_ACK) {
+ s->cpus[src_cpu].status &= ~OMPIC_STATUS_IRQ_PENDING;
+ qemu_irq_lower(s->cpus[src_cpu].irq);
+ }
+ }
+}
+
+static const MemoryRegionOps ompic_ops = {
+ .read = ompic_read,
+ .write = ompic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .max_access_size = 8,
+ },
+};
+
+static void or1k_ompic_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ OR1KOMPICState *s = OR1K_OMPIC(obj);
+
+ memory_region_init_io(&s->mr, OBJECT(s), &ompic_ops, s,
+ "or1k-ompic", OMPIC_ADDRSPACE_SZ);
+ sysbus_init_mmio(sbd, &s->mr);
+}
+
+static void or1k_ompic_realize(DeviceState *dev, Error **errp)
+{
+ OR1KOMPICState *s = OR1K_OMPIC(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ int i;
+
+ if (s->num_cpus > OMPIC_MAX_CPUS) {
+ error_setg(errp, "Exceeded maximum CPUs %d", s->num_cpus);
+ return;
+ }
+ /* Init IRQ sources for all CPUs */
+ for (i = 0; i < s->num_cpus; i++) {
+ sysbus_init_irq(sbd, &s->cpus[i].irq);
+ }
+}
+
+static Property or1k_ompic_properties[] = {
+ DEFINE_PROP_UINT32("num-cpus", OR1KOMPICState, num_cpus, 1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription vmstate_or1k_ompic_cpu = {
+ .name = "or1k_ompic_cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(status, OR1KOMPICCPUState),
+ VMSTATE_UINT32(control, OR1KOMPICCPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_or1k_ompic = {
+ .name = TYPE_OR1K_OMPIC,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(cpus, OR1KOMPICState, OMPIC_MAX_CPUS, 1,
+ vmstate_or1k_ompic_cpu, OR1KOMPICCPUState),
+ VMSTATE_UINT32(num_cpus, OR1KOMPICState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void or1k_ompic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, or1k_ompic_properties);
+ dc->realize = or1k_ompic_realize;
+ dc->vmsd = &vmstate_or1k_ompic;
+}
+
+static const TypeInfo or1k_ompic_info = {
+ .name = TYPE_OR1K_OMPIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(OR1KOMPICState),
+ .instance_init = or1k_ompic_init,
+ .class_init = or1k_ompic_class_init,
+};
+
+static void or1k_ompic_register_types(void)
+{
+ type_register_static(&or1k_ompic_info);
+}
+
+type_init(or1k_ompic_register_types)
diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c
new file mode 100644
index 000000000..49504e740
--- /dev/null
+++ b/hw/intc/openpic.c
@@ -0,0 +1,1645 @@
+/*
+ * OpenPIC emulation
+ *
+ * Copyright (c) 2004 Jocelyn Mayer
+ * 2011 Alexander Graf
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+/*
+ *
+ * Based on OpenPic implementations:
+ * - Motorola MPC8245 & MPC8540 user manuals.
+ * - Motorola Harrier programmer manual
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/ppc/mac.h"
+#include "hw/pci/pci.h"
+#include "hw/ppc/openpic.h"
+#include "hw/ppc/ppc_e500.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "hw/pci/msi.h"
+#include "qapi/error.h"
+#include "qemu/bitops.h"
+#include "qapi/qmp/qerror.h"
+#include "qemu/module.h"
+#include "qemu/timer.h"
+#include "qemu/error-report.h"
+
+/* #define DEBUG_OPENPIC */
+
+#ifdef DEBUG_OPENPIC
+static const int debug_openpic = 1;
+#else
+static const int debug_openpic = 0;
+#endif
+
+static int get_current_cpu(void);
+#define DPRINTF(fmt, ...) do { \
+ if (debug_openpic) { \
+ info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+/* OpenPIC capability flags */
+#define OPENPIC_FLAG_IDR_CRIT (1 << 0)
+#define OPENPIC_FLAG_ILR (2 << 0)
+
+/* OpenPIC address map */
+#define OPENPIC_GLB_REG_START 0x0
+#define OPENPIC_GLB_REG_SIZE 0x10F0
+#define OPENPIC_TMR_REG_START 0x10F0
+#define OPENPIC_TMR_REG_SIZE 0x220
+#define OPENPIC_MSI_REG_START 0x1600
+#define OPENPIC_MSI_REG_SIZE 0x200
+#define OPENPIC_SUMMARY_REG_START 0x3800
+#define OPENPIC_SUMMARY_REG_SIZE 0x800
+#define OPENPIC_SRC_REG_START 0x10000
+#define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20)
+#define OPENPIC_CPU_REG_START 0x20000
+#define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000)
+
+static FslMpicInfo fsl_mpic_20 = {
+ .max_ext = 12,
+};
+
+static FslMpicInfo fsl_mpic_42 = {
+ .max_ext = 12,
+};
+
+#define FRR_NIRQ_SHIFT 16
+#define FRR_NCPU_SHIFT 8
+#define FRR_VID_SHIFT 0
+
+#define VID_REVISION_1_2 2
+#define VID_REVISION_1_3 3
+
+#define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
+#define VIR_MPIC2A 0x00004614 /* IBM MPIC-2A */
+
+#define GCR_RESET 0x80000000
+#define GCR_MODE_PASS 0x00000000
+#define GCR_MODE_MIXED 0x20000000
+#define GCR_MODE_PROXY 0x60000000
+
+#define TBCR_CI 0x80000000 /* count inhibit */
+#define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
+
+#define IDR_EP_SHIFT 31
+#define IDR_EP_MASK (1U << IDR_EP_SHIFT)
+#define IDR_CI0_SHIFT 30
+#define IDR_CI1_SHIFT 29
+#define IDR_P1_SHIFT 1
+#define IDR_P0_SHIFT 0
+
+#define ILR_INTTGT_MASK 0x000000ff
+#define ILR_INTTGT_INT 0x00
+#define ILR_INTTGT_CINT 0x01 /* critical */
+#define ILR_INTTGT_MCP 0x02 /* machine check */
+
+/*
+ * The currently supported INTTGT values happen to be the same as QEMU's
+ * openpic output codes, but don't depend on this. The output codes
+ * could change (unlikely, but...) or support could be added for
+ * more INTTGT values.
+ */
+static const int inttgt_output[][2] = {
+ { ILR_INTTGT_INT, OPENPIC_OUTPUT_INT },
+ { ILR_INTTGT_CINT, OPENPIC_OUTPUT_CINT },
+ { ILR_INTTGT_MCP, OPENPIC_OUTPUT_MCK },
+};
+
+static int inttgt_to_output(int inttgt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
+ if (inttgt_output[i][0] == inttgt) {
+ return inttgt_output[i][1];
+ }
+ }
+
+ error_report("%s: unsupported inttgt %d", __func__, inttgt);
+ return OPENPIC_OUTPUT_INT;
+}
+
+static int output_to_inttgt(int output)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
+ if (inttgt_output[i][1] == output) {
+ return inttgt_output[i][0];
+ }
+ }
+
+ abort();
+}
+
+#define MSIIR_OFFSET 0x140
+#define MSIIR_SRS_SHIFT 29
+#define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
+#define MSIIR_IBS_SHIFT 24
+#define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
+
+static int get_current_cpu(void)
+{
+ if (!current_cpu) {
+ return -1;
+ }
+
+ return current_cpu->cpu_index;
+}
+
+static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
+ int idx);
+static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
+ uint32_t val, int idx);
+static void openpic_reset(DeviceState *d);
+
+/*
+ * Convert between openpic clock ticks and nanosecs. In the hardware the clock
+ * frequency is driven by board inputs to the PIC which the PIC would then
+ * divide by 4 or 8. For now hard code to 25MZ.
+ */
+#define OPENPIC_TIMER_FREQ_MHZ 25
+#define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ)
+static inline uint64_t ns_to_ticks(uint64_t ns)
+{
+ return ns / OPENPIC_TIMER_NS_PER_TICK;
+}
+static inline uint64_t ticks_to_ns(uint64_t ticks)
+{
+ return ticks * OPENPIC_TIMER_NS_PER_TICK;
+}
+
+static inline void IRQ_setbit(IRQQueue *q, int n_IRQ)
+{
+ set_bit(n_IRQ, q->queue);
+}
+
+static inline void IRQ_resetbit(IRQQueue *q, int n_IRQ)
+{
+ clear_bit(n_IRQ, q->queue);
+}
+
+static void IRQ_check(OpenPICState *opp, IRQQueue *q)
+{
+ int irq = -1;
+ int next = -1;
+ int priority = -1;
+
+ for (;;) {
+ irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
+ if (irq == opp->max_irq) {
+ break;
+ }
+
+ DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d",
+ irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
+
+ if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
+ next = irq;
+ priority = IVPR_PRIORITY(opp->src[irq].ivpr);
+ }
+ }
+
+ q->next = next;
+ q->priority = priority;
+}
+
+static int IRQ_get_next(OpenPICState *opp, IRQQueue *q)
+{
+ /* XXX: optimize */
+ IRQ_check(opp, q);
+
+ return q->next;
+}
+
+static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
+ bool active, bool was_active)
+{
+ IRQDest *dst;
+ IRQSource *src;
+ int priority;
+
+ dst = &opp->dst[n_CPU];
+ src = &opp->src[n_IRQ];
+
+ DPRINTF("%s: IRQ %d active %d was %d",
+ __func__, n_IRQ, active, was_active);
+
+ if (src->output != OPENPIC_OUTPUT_INT) {
+ DPRINTF("%s: output %d irq %d active %d was %d count %d",
+ __func__, src->output, n_IRQ, active, was_active,
+ dst->outputs_active[src->output]);
+
+ /*
+ * On Freescale MPIC, critical interrupts ignore priority,
+ * IACK, EOI, etc. Before MPIC v4.1 they also ignore
+ * masking.
+ */
+ if (active) {
+ if (!was_active && dst->outputs_active[src->output]++ == 0) {
+ DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d",
+ __func__, src->output, n_CPU, n_IRQ);
+ qemu_irq_raise(dst->irqs[src->output]);
+ }
+ } else {
+ if (was_active && --dst->outputs_active[src->output] == 0) {
+ DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d",
+ __func__, src->output, n_CPU, n_IRQ);
+ qemu_irq_lower(dst->irqs[src->output]);
+ }
+ }
+
+ return;
+ }
+
+ priority = IVPR_PRIORITY(src->ivpr);
+
+ /*
+ * Even if the interrupt doesn't have enough priority,
+ * it is still raised, in case ctpr is lowered later.
+ */
+ if (active) {
+ IRQ_setbit(&dst->raised, n_IRQ);
+ } else {
+ IRQ_resetbit(&dst->raised, n_IRQ);
+ }
+
+ IRQ_check(opp, &dst->raised);
+
+ if (active && priority <= dst->ctpr) {
+ DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d",
+ __func__, n_IRQ, priority, dst->ctpr, n_CPU);
+ active = 0;
+ }
+
+ if (active) {
+ if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
+ priority <= dst->servicing.priority) {
+ DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d",
+ __func__, n_IRQ, dst->servicing.next, n_CPU);
+ } else {
+ DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d",
+ __func__, n_CPU, n_IRQ, dst->raised.next);
+ qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
+ }
+ } else {
+ IRQ_get_next(opp, &dst->servicing);
+ if (dst->raised.priority > dst->ctpr &&
+ dst->raised.priority > dst->servicing.priority) {
+ DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d",
+ __func__, n_IRQ, dst->raised.next, dst->raised.priority,
+ dst->ctpr, dst->servicing.priority, n_CPU);
+ /* IRQ line stays asserted */
+ } else {
+ DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d",
+ __func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU);
+ qemu_irq_lower(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
+ }
+ }
+}
+
+/* update pic state because registers for n_IRQ have changed value */
+static void openpic_update_irq(OpenPICState *opp, int n_IRQ)
+{
+ IRQSource *src;
+ bool active, was_active;
+ int i;
+
+ src = &opp->src[n_IRQ];
+ active = src->pending;
+
+ if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
+ /* Interrupt source is disabled */
+ DPRINTF("%s: IRQ %d is disabled", __func__, n_IRQ);
+ active = false;
+ }
+
+ was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
+
+ /*
+ * We don't have a similar check for already-active because
+ * ctpr may have changed and we need to withdraw the interrupt.
+ */
+ if (!active && !was_active) {
+ DPRINTF("%s: IRQ %d is already inactive", __func__, n_IRQ);
+ return;
+ }
+
+ if (active) {
+ src->ivpr |= IVPR_ACTIVITY_MASK;
+ } else {
+ src->ivpr &= ~IVPR_ACTIVITY_MASK;
+ }
+
+ if (src->destmask == 0) {
+ /* No target */
+ DPRINTF("%s: IRQ %d has no target", __func__, n_IRQ);
+ return;
+ }
+
+ if (src->destmask == (1 << src->last_cpu)) {
+ /* Only one CPU is allowed to receive this IRQ */
+ IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
+ } else if (!(src->ivpr & IVPR_MODE_MASK)) {
+ /* Directed delivery mode */
+ for (i = 0; i < opp->nb_cpus; i++) {
+ if (src->destmask & (1 << i)) {
+ IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
+ }
+ }
+ } else {
+ /* Distributed delivery mode */
+ for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
+ if (i == opp->nb_cpus) {
+ i = 0;
+ }
+ if (src->destmask & (1 << i)) {
+ IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
+ src->last_cpu = i;
+ break;
+ }
+ }
+ }
+}
+
+static void openpic_set_irq(void *opaque, int n_IRQ, int level)
+{
+ OpenPICState *opp = opaque;
+ IRQSource *src;
+
+ if (n_IRQ >= OPENPIC_MAX_IRQ) {
+ error_report("%s: IRQ %d out of range", __func__, n_IRQ);
+ abort();
+ }
+
+ src = &opp->src[n_IRQ];
+ DPRINTF("openpic: set irq %d = %d ivpr=0x%08x",
+ n_IRQ, level, src->ivpr);
+ if (src->level) {
+ /* level-sensitive irq */
+ src->pending = level;
+ openpic_update_irq(opp, n_IRQ);
+ } else {
+ /* edge-sensitive irq */
+ if (level) {
+ src->pending = 1;
+ openpic_update_irq(opp, n_IRQ);
+ }
+
+ if (src->output != OPENPIC_OUTPUT_INT) {
+ /*
+ * Edge-triggered interrupts shouldn't be used
+ * with non-INT delivery, but just in case,
+ * try to make it do something sane rather than
+ * cause an interrupt storm. This is close to
+ * what you'd probably see happen in real hardware.
+ */
+ src->pending = 0;
+ openpic_update_irq(opp, n_IRQ);
+ }
+ }
+}
+
+static inline uint32_t read_IRQreg_idr(OpenPICState *opp, int n_IRQ)
+{
+ return opp->src[n_IRQ].idr;
+}
+
+static inline uint32_t read_IRQreg_ilr(OpenPICState *opp, int n_IRQ)
+{
+ if (opp->flags & OPENPIC_FLAG_ILR) {
+ return output_to_inttgt(opp->src[n_IRQ].output);
+ }
+
+ return 0xffffffff;
+}
+
+static inline uint32_t read_IRQreg_ivpr(OpenPICState *opp, int n_IRQ)
+{
+ return opp->src[n_IRQ].ivpr;
+}
+
+static inline void write_IRQreg_idr(OpenPICState *opp, int n_IRQ, uint32_t val)
+{
+ IRQSource *src = &opp->src[n_IRQ];
+ uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
+ uint32_t crit_mask = 0;
+ uint32_t mask = normal_mask;
+ int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
+ int i;
+
+ if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
+ crit_mask = mask << crit_shift;
+ mask |= crit_mask | IDR_EP;
+ }
+
+ src->idr = val & mask;
+ DPRINTF("Set IDR %d to 0x%08x", n_IRQ, src->idr);
+
+ if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
+ if (src->idr & crit_mask) {
+ if (src->idr & normal_mask) {
+ DPRINTF("%s: IRQ configured for multiple output types, using "
+ "critical", __func__);
+ }
+
+ src->output = OPENPIC_OUTPUT_CINT;
+ src->nomask = true;
+ src->destmask = 0;
+
+ for (i = 0; i < opp->nb_cpus; i++) {
+ int n_ci = IDR_CI0_SHIFT - i;
+
+ if (src->idr & (1UL << n_ci)) {
+ src->destmask |= 1UL << i;
+ }
+ }
+ } else {
+ src->output = OPENPIC_OUTPUT_INT;
+ src->nomask = false;
+ src->destmask = src->idr & normal_mask;
+ }
+ } else {
+ src->destmask = src->idr;
+ }
+}
+
+static inline void write_IRQreg_ilr(OpenPICState *opp, int n_IRQ, uint32_t val)
+{
+ if (opp->flags & OPENPIC_FLAG_ILR) {
+ IRQSource *src = &opp->src[n_IRQ];
+
+ src->output = inttgt_to_output(val & ILR_INTTGT_MASK);
+ DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ, src->idr,
+ src->output);
+
+ /* TODO: on MPIC v4.0 only, set nomask for non-INT */
+ }
+}
+
+static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val)
+{
+ uint32_t mask;
+
+ /*
+ * NOTE when implementing newer FSL MPIC models: starting with v4.0,
+ * the polarity bit is read-only on internal interrupts.
+ */
+ mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
+ IVPR_POLARITY_MASK | opp->vector_mask;
+
+ /* ACTIVITY bit is read-only */
+ opp->src[n_IRQ].ivpr =
+ (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
+
+ /*
+ * For FSL internal interrupts, The sense bit is reserved and zero,
+ * and the interrupt is always level-triggered. Timers and IPIs
+ * have no sense or polarity bits, and are edge-triggered.
+ */
+ switch (opp->src[n_IRQ].type) {
+ case IRQ_TYPE_NORMAL:
+ opp->src[n_IRQ].level = !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
+ break;
+
+ case IRQ_TYPE_FSLINT:
+ opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
+ break;
+
+ case IRQ_TYPE_FSLSPECIAL:
+ opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
+ break;
+ }
+
+ openpic_update_irq(opp, n_IRQ);
+ DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ, val,
+ opp->src[n_IRQ].ivpr);
+}
+
+static void openpic_gcr_write(OpenPICState *opp, uint64_t val)
+{
+ bool mpic_proxy = false;
+
+ if (val & GCR_RESET) {
+ openpic_reset(DEVICE(opp));
+ return;
+ }
+
+ opp->gcr &= ~opp->mpic_mode_mask;
+ opp->gcr |= val & opp->mpic_mode_mask;
+
+ /* Set external proxy mode */
+ if ((val & opp->mpic_mode_mask) == GCR_MODE_PROXY) {
+ mpic_proxy = true;
+ }
+
+ ppce500_set_mpic_proxy(mpic_proxy);
+}
+
+static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned len)
+{
+ OpenPICState *opp = opaque;
+ IRQDest *dst;
+ int idx;
+
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
+ __func__, addr, val);
+ if (addr & 0xF) {
+ return;
+ }
+ switch (addr) {
+ case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
+ break;
+ case 0x40:
+ case 0x50:
+ case 0x60:
+ case 0x70:
+ case 0x80:
+ case 0x90:
+ case 0xA0:
+ case 0xB0:
+ openpic_cpu_write_internal(opp, addr, val, get_current_cpu());
+ break;
+ case 0x1000: /* FRR */
+ break;
+ case 0x1020: /* GCR */
+ openpic_gcr_write(opp, val);
+ break;
+ case 0x1080: /* VIR */
+ break;
+ case 0x1090: /* PIR */
+ for (idx = 0; idx < opp->nb_cpus; idx++) {
+ if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) {
+ DPRINTF("Raise OpenPIC RESET output for CPU %d", idx);
+ dst = &opp->dst[idx];
+ qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]);
+ } else if (!(val & (1 << idx)) && (opp->pir & (1 << idx))) {
+ DPRINTF("Lower OpenPIC RESET output for CPU %d", idx);
+ dst = &opp->dst[idx];
+ qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]);
+ }
+ }
+ opp->pir = val;
+ break;
+ case 0x10A0: /* IPI_IVPR */
+ case 0x10B0:
+ case 0x10C0:
+ case 0x10D0:
+ {
+ int idx;
+ idx = (addr - 0x10A0) >> 4;
+ write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
+ }
+ break;
+ case 0x10E0: /* SPVE */
+ opp->spve = val & opp->vector_mask;
+ break;
+ default:
+ break;
+ }
+}
+
+static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len)
+{
+ OpenPICState *opp = opaque;
+ uint32_t retval;
+
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
+ retval = 0xFFFFFFFF;
+ if (addr & 0xF) {
+ return retval;
+ }
+ switch (addr) {
+ case 0x1000: /* FRR */
+ retval = opp->frr;
+ break;
+ case 0x1020: /* GCR */
+ retval = opp->gcr;
+ break;
+ case 0x1080: /* VIR */
+ retval = opp->vir;
+ break;
+ case 0x1090: /* PIR */
+ retval = 0x00000000;
+ break;
+ case 0x00: /* Block Revision Register1 (BRR1) */
+ retval = opp->brr1;
+ break;
+ case 0x40:
+ case 0x50:
+ case 0x60:
+ case 0x70:
+ case 0x80:
+ case 0x90:
+ case 0xA0:
+ case 0xB0:
+ retval = openpic_cpu_read_internal(opp, addr, get_current_cpu());
+ break;
+ case 0x10A0: /* IPI_IVPR */
+ case 0x10B0:
+ case 0x10C0:
+ case 0x10D0:
+ {
+ int idx;
+ idx = (addr - 0x10A0) >> 4;
+ retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
+ }
+ break;
+ case 0x10E0: /* SPVE */
+ retval = opp->spve;
+ break;
+ default:
+ break;
+ }
+ DPRINTF("%s: => 0x%08x", __func__, retval);
+
+ return retval;
+}
+
+static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled);
+
+static void qemu_timer_cb(void *opaque)
+{
+ OpenPICTimer *tmr = opaque;
+ OpenPICState *opp = tmr->opp;
+ uint32_t n_IRQ = tmr->n_IRQ;
+ uint32_t val = tmr->tbcr & ~TBCR_CI;
+ uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */
+
+ DPRINTF("%s n_IRQ=%d", __func__, n_IRQ);
+ /* Reload current count from base count and setup timer. */
+ tmr->tccr = val | tog;
+ openpic_tmr_set_tmr(tmr, val, /*enabled=*/true);
+ /* Raise the interrupt. */
+ opp->src[n_IRQ].destmask = read_IRQreg_idr(opp, n_IRQ);
+ openpic_set_irq(opp, n_IRQ, 1);
+ openpic_set_irq(opp, n_IRQ, 0);
+}
+
+/*
+ * If enabled is true, arranges for an interrupt to be raised val clocks into
+ * the future, if enabled is false cancels the timer.
+ */
+static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled)
+{
+ uint64_t ns = ticks_to_ns(val & ~TCCR_TOG);
+ /*
+ * A count of zero causes a timer to be set to expire immediately. This
+ * effectively stops the simulation since the timer is constantly expiring
+ * which prevents guest code execution, so we don't honor that
+ * configuration. On real hardware, this situation would generate an
+ * interrupt on every clock cycle if the interrupt was unmasked.
+ */
+ if ((ns == 0) || !enabled) {
+ tmr->qemu_timer_active = false;
+ tmr->tccr = tmr->tccr & TCCR_TOG;
+ timer_del(tmr->qemu_timer); /* set timer to never expire. */
+ } else {
+ tmr->qemu_timer_active = true;
+ uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ tmr->origin_time = now;
+ timer_mod(tmr->qemu_timer, now + ns); /* set timer expiration. */
+ }
+}
+
+/*
+ * Returns the currrent tccr value, i.e., timer value (in clocks) with
+ * appropriate TOG.
+ */
+static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr)
+{
+ uint64_t retval;
+ if (!tmr->qemu_timer_active) {
+ retval = tmr->tccr;
+ } else {
+ uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ uint64_t used = now - tmr->origin_time; /* nsecs */
+ uint32_t used_ticks = (uint32_t)ns_to_ticks(used);
+ uint32_t count = (tmr->tccr & ~TCCR_TOG) - used_ticks;
+ retval = (uint32_t)((tmr->tccr & TCCR_TOG) | (count & ~TCCR_TOG));
+ }
+ return retval;
+}
+
+static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned len)
+{
+ OpenPICState *opp = opaque;
+ int idx;
+
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
+ __func__, (addr + 0x10f0), val);
+ if (addr & 0xF) {
+ return;
+ }
+
+ if (addr == 0) {
+ /* TFRR */
+ opp->tfrr = val;
+ return;
+ }
+ addr -= 0x10; /* correct for TFRR */
+ idx = (addr >> 6) & 0x3;
+
+ switch (addr & 0x30) {
+ case 0x00: /* TCCR */
+ break;
+ case 0x10: /* TBCR */
+ /* Did the enable status change? */
+ if ((opp->timers[idx].tbcr & TBCR_CI) != (val & TBCR_CI)) {
+ /* Did "Count Inhibit" transition from 1 to 0? */
+ if ((val & TBCR_CI) == 0) {
+ opp->timers[idx].tccr = val & ~TCCR_TOG;
+ }
+ openpic_tmr_set_tmr(&opp->timers[idx],
+ (val & ~TBCR_CI),
+ /*enabled=*/((val & TBCR_CI) == 0));
+ }
+ opp->timers[idx].tbcr = val;
+ break;
+ case 0x20: /* TVPR */
+ write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
+ break;
+ case 0x30: /* TDR */
+ write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
+ break;
+ }
+}
+
+static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
+{
+ OpenPICState *opp = opaque;
+ uint32_t retval = -1;
+ int idx;
+
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr + 0x10f0);
+ if (addr & 0xF) {
+ goto out;
+ }
+ if (addr == 0) {
+ /* TFRR */
+ retval = opp->tfrr;
+ goto out;
+ }
+ addr -= 0x10; /* correct for TFRR */
+ idx = (addr >> 6) & 0x3;
+ switch (addr & 0x30) {
+ case 0x00: /* TCCR */
+ retval = openpic_tmr_get_timer(&opp->timers[idx]);
+ break;
+ case 0x10: /* TBCR */
+ retval = opp->timers[idx].tbcr;
+ break;
+ case 0x20: /* TVPR */
+ retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
+ break;
+ case 0x30: /* TDR */
+ retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
+ break;
+ }
+
+out:
+ DPRINTF("%s: => 0x%08x", __func__, retval);
+
+ return retval;
+}
+
+static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned len)
+{
+ OpenPICState *opp = opaque;
+ int idx;
+
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
+ __func__, addr, val);
+
+ addr = addr & 0xffff;
+ idx = addr >> 5;
+
+ switch (addr & 0x1f) {
+ case 0x00:
+ write_IRQreg_ivpr(opp, idx, val);
+ break;
+ case 0x10:
+ write_IRQreg_idr(opp, idx, val);
+ break;
+ case 0x18:
+ write_IRQreg_ilr(opp, idx, val);
+ break;
+ }
+}
+
+static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len)
+{
+ OpenPICState *opp = opaque;
+ uint32_t retval;
+ int idx;
+
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
+ retval = 0xFFFFFFFF;
+
+ addr = addr & 0xffff;
+ idx = addr >> 5;
+
+ switch (addr & 0x1f) {
+ case 0x00:
+ retval = read_IRQreg_ivpr(opp, idx);
+ break;
+ case 0x10:
+ retval = read_IRQreg_idr(opp, idx);
+ break;
+ case 0x18:
+ retval = read_IRQreg_ilr(opp, idx);
+ break;
+ }
+
+ DPRINTF("%s: => 0x%08x", __func__, retval);
+ return retval;
+}
+
+static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ OpenPICState *opp = opaque;
+ int idx = opp->irq_msi;
+ int srs, ibs;
+
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
+ __func__, addr, val);
+ if (addr & 0xF) {
+ return;
+ }
+
+ switch (addr) {
+ case MSIIR_OFFSET:
+ srs = val >> MSIIR_SRS_SHIFT;
+ idx += srs;
+ ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
+ opp->msi[srs].msir |= 1 << ibs;
+ openpic_set_irq(opp, idx, 1);
+ break;
+ default:
+ /* most registers are read-only, thus ignored */
+ break;
+ }
+}
+
+static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size)
+{
+ OpenPICState *opp = opaque;
+ uint64_t r = 0;
+ int i, srs;
+
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
+ if (addr & 0xF) {
+ return -1;
+ }
+
+ srs = addr >> 4;
+
+ switch (addr) {
+ case 0x00:
+ case 0x10:
+ case 0x20:
+ case 0x30:
+ case 0x40:
+ case 0x50:
+ case 0x60:
+ case 0x70: /* MSIRs */
+ r = opp->msi[srs].msir;
+ /* Clear on read */
+ opp->msi[srs].msir = 0;
+ openpic_set_irq(opp, opp->irq_msi + srs, 0);
+ break;
+ case 0x120: /* MSISR */
+ for (i = 0; i < MAX_MSI; i++) {
+ r |= (opp->msi[i].msir ? 1 : 0) << i;
+ }
+ break;
+ }
+
+ return r;
+}
+
+static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t r = 0;
+
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
+
+ /* TODO: EISR/EIMR */
+
+ return r;
+}
+
+static void openpic_summary_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
+ __func__, addr, val);
+
+ /* TODO: EISR/EIMR */
+}
+
+static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
+ uint32_t val, int idx)
+{
+ OpenPICState *opp = opaque;
+ IRQSource *src;
+ IRQDest *dst;
+ int s_IRQ, n_IRQ;
+
+ DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x", __func__, idx,
+ addr, val);
+
+ if (idx < 0 || idx >= opp->nb_cpus) {
+ return;
+ }
+
+ if (addr & 0xF) {
+ return;
+ }
+ dst = &opp->dst[idx];
+ addr &= 0xFF0;
+ switch (addr) {
+ case 0x40: /* IPIDR */
+ case 0x50:
+ case 0x60:
+ case 0x70:
+ idx = (addr - 0x40) >> 4;
+ /* we use IDE as mask which CPUs to deliver the IPI to still. */
+ opp->src[opp->irq_ipi0 + idx].destmask |= val;
+ openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
+ openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
+ break;
+ case 0x80: /* CTPR */
+ dst->ctpr = val & 0x0000000F;
+
+ DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d",
+ __func__, idx, dst->ctpr, dst->raised.priority,
+ dst->servicing.priority);
+
+ if (dst->raised.priority <= dst->ctpr) {
+ DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr",
+ __func__, idx);
+ qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
+ } else if (dst->raised.priority > dst->servicing.priority) {
+ DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d",
+ __func__, idx, dst->raised.next);
+ qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]);
+ }
+
+ break;
+ case 0x90: /* WHOAMI */
+ /* Read-only register */
+ break;
+ case 0xA0: /* IACK */
+ /* Read-only register */
+ break;
+ case 0xB0: /* EOI */
+ DPRINTF("EOI");
+ s_IRQ = IRQ_get_next(opp, &dst->servicing);
+
+ if (s_IRQ < 0) {
+ DPRINTF("%s: EOI with no interrupt in service", __func__);
+ break;
+ }
+
+ IRQ_resetbit(&dst->servicing, s_IRQ);
+ /* Set up next servicing IRQ */
+ s_IRQ = IRQ_get_next(opp, &dst->servicing);
+ /* Check queued interrupts. */
+ n_IRQ = IRQ_get_next(opp, &dst->raised);
+ src = &opp->src[n_IRQ];
+ if (n_IRQ != -1 &&
+ (s_IRQ == -1 ||
+ IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
+ DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
+ idx, n_IRQ);
+ qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void openpic_cpu_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned len)
+{
+ openpic_cpu_write_internal(opaque, addr, val, (addr & 0x1f000) >> 12);
+}
+
+
+static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu)
+{
+ IRQSource *src;
+ int retval, irq;
+
+ DPRINTF("Lower OpenPIC INT output");
+ qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
+
+ irq = IRQ_get_next(opp, &dst->raised);
+ DPRINTF("IACK: irq=%d", irq);
+
+ if (irq == -1) {
+ /* No more interrupt pending */
+ return opp->spve;
+ }
+
+ src = &opp->src[irq];
+ if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
+ !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
+ error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x",
+ __func__, irq, dst->ctpr, src->ivpr);
+ openpic_update_irq(opp, irq);
+ retval = opp->spve;
+ } else {
+ /* IRQ enter servicing state */
+ IRQ_setbit(&dst->servicing, irq);
+ retval = IVPR_VECTOR(opp, src->ivpr);
+ }
+
+ if (!src->level) {
+ /* edge-sensitive IRQ */
+ src->ivpr &= ~IVPR_ACTIVITY_MASK;
+ src->pending = 0;
+ IRQ_resetbit(&dst->raised, irq);
+ }
+
+ /* Timers and IPIs support multicast. */
+ if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) ||
+ ((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) {
+ DPRINTF("irq is IPI or TMR");
+ src->destmask &= ~(1 << cpu);
+ if (src->destmask && !src->level) {
+ /* trigger on CPUs that didn't know about it yet */
+ openpic_set_irq(opp, irq, 1);
+ openpic_set_irq(opp, irq, 0);
+ /* if all CPUs knew about it, set active bit again */
+ src->ivpr |= IVPR_ACTIVITY_MASK;
+ }
+ }
+
+ return retval;
+}
+
+static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
+ int idx)
+{
+ OpenPICState *opp = opaque;
+ IRQDest *dst;
+ uint32_t retval;
+
+ DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx, __func__, idx, addr);
+ retval = 0xFFFFFFFF;
+
+ if (idx < 0 || idx >= opp->nb_cpus) {
+ return retval;
+ }
+
+ if (addr & 0xF) {
+ return retval;
+ }
+ dst = &opp->dst[idx];
+ addr &= 0xFF0;
+ switch (addr) {
+ case 0x80: /* CTPR */
+ retval = dst->ctpr;
+ break;
+ case 0x90: /* WHOAMI */
+ retval = idx;
+ break;
+ case 0xA0: /* IACK */
+ retval = openpic_iack(opp, dst, idx);
+ break;
+ case 0xB0: /* EOI */
+ retval = 0;
+ break;
+ default:
+ break;
+ }
+ DPRINTF("%s: => 0x%08x", __func__, retval);
+
+ return retval;
+}
+
+static uint64_t openpic_cpu_read(void *opaque, hwaddr addr, unsigned len)
+{
+ return openpic_cpu_read_internal(opaque, addr, (addr & 0x1f000) >> 12);
+}
+
+static const MemoryRegionOps openpic_glb_ops_le = {
+ .write = openpic_gbl_write,
+ .read = openpic_gbl_read,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps openpic_glb_ops_be = {
+ .write = openpic_gbl_write,
+ .read = openpic_gbl_read,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps openpic_tmr_ops_le = {
+ .write = openpic_tmr_write,
+ .read = openpic_tmr_read,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps openpic_tmr_ops_be = {
+ .write = openpic_tmr_write,
+ .read = openpic_tmr_read,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps openpic_cpu_ops_le = {
+ .write = openpic_cpu_write,
+ .read = openpic_cpu_read,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps openpic_cpu_ops_be = {
+ .write = openpic_cpu_write,
+ .read = openpic_cpu_read,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps openpic_src_ops_le = {
+ .write = openpic_src_write,
+ .read = openpic_src_read,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps openpic_src_ops_be = {
+ .write = openpic_src_write,
+ .read = openpic_src_read,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps openpic_msi_ops_be = {
+ .read = openpic_msi_read,
+ .write = openpic_msi_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps openpic_summary_ops_be = {
+ .read = openpic_summary_read,
+ .write = openpic_summary_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void openpic_reset(DeviceState *d)
+{
+ OpenPICState *opp = OPENPIC(d);
+ int i;
+
+ opp->gcr = GCR_RESET;
+ /* Initialise controller registers */
+ opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
+ ((opp->nb_cpus - 1) << FRR_NCPU_SHIFT) |
+ (opp->vid << FRR_VID_SHIFT);
+
+ opp->pir = 0;
+ opp->spve = -1 & opp->vector_mask;
+ opp->tfrr = opp->tfrr_reset;
+ /* Initialise IRQ sources */
+ for (i = 0; i < opp->max_irq; i++) {
+ opp->src[i].ivpr = opp->ivpr_reset;
+ switch (opp->src[i].type) {
+ case IRQ_TYPE_NORMAL:
+ opp->src[i].level = !!(opp->ivpr_reset & IVPR_SENSE_MASK);
+ break;
+
+ case IRQ_TYPE_FSLINT:
+ opp->src[i].ivpr |= IVPR_POLARITY_MASK;
+ break;
+
+ case IRQ_TYPE_FSLSPECIAL:
+ break;
+ }
+
+ /* Mask all IPI interrupts for Freescale OpenPIC */
+ if ((opp->model == OPENPIC_MODEL_FSL_MPIC_20) ||
+ (opp->model == OPENPIC_MODEL_FSL_MPIC_42)) {
+ if (i >= opp->irq_ipi0 && i < opp->irq_tim0) {
+ write_IRQreg_idr(opp, i, 0);
+ continue;
+ }
+ }
+
+ write_IRQreg_idr(opp, i, opp->idr_reset);
+ }
+ /* Initialise IRQ destinations */
+ for (i = 0; i < opp->nb_cpus; i++) {
+ opp->dst[i].ctpr = 15;
+ opp->dst[i].raised.next = -1;
+ opp->dst[i].raised.priority = 0;
+ bitmap_clear(opp->dst[i].raised.queue, 0, IRQQUEUE_SIZE_BITS);
+ opp->dst[i].servicing.next = -1;
+ opp->dst[i].servicing.priority = 0;
+ bitmap_clear(opp->dst[i].servicing.queue, 0, IRQQUEUE_SIZE_BITS);
+ }
+ /* Initialise timers */
+ for (i = 0; i < OPENPIC_MAX_TMR; i++) {
+ opp->timers[i].tccr = 0;
+ opp->timers[i].tbcr = TBCR_CI;
+ if (opp->timers[i].qemu_timer_active) {
+ timer_del(opp->timers[i].qemu_timer); /* Inhibit timer */
+ opp->timers[i].qemu_timer_active = false;
+ }
+ }
+ /* Go out of RESET state */
+ opp->gcr = 0;
+}
+
+typedef struct MemReg {
+ const char *name;
+ MemoryRegionOps const *ops;
+ hwaddr start_addr;
+ ram_addr_t size;
+} MemReg;
+
+static void fsl_common_init(OpenPICState *opp)
+{
+ int i;
+ int virq = OPENPIC_MAX_SRC;
+
+ opp->vid = VID_REVISION_1_2;
+ opp->vir = VIR_GENERIC;
+ opp->vector_mask = 0xFFFF;
+ opp->tfrr_reset = 0;
+ opp->ivpr_reset = IVPR_MASK_MASK;
+ opp->idr_reset = 1 << 0;
+ opp->max_irq = OPENPIC_MAX_IRQ;
+
+ opp->irq_ipi0 = virq;
+ virq += OPENPIC_MAX_IPI;
+ opp->irq_tim0 = virq;
+ virq += OPENPIC_MAX_TMR;
+
+ assert(virq <= OPENPIC_MAX_IRQ);
+
+ opp->irq_msi = 224;
+
+ msi_nonbroken = true;
+ for (i = 0; i < opp->fsl->max_ext; i++) {
+ opp->src[i].level = false;
+ }
+
+ /* Internal interrupts, including message and MSI */
+ for (i = 16; i < OPENPIC_MAX_SRC; i++) {
+ opp->src[i].type = IRQ_TYPE_FSLINT;
+ opp->src[i].level = true;
+ }
+
+ /* timers and IPIs */
+ for (i = OPENPIC_MAX_SRC; i < virq; i++) {
+ opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
+ opp->src[i].level = false;
+ }
+
+ for (i = 0; i < OPENPIC_MAX_TMR; i++) {
+ opp->timers[i].n_IRQ = opp->irq_tim0 + i;
+ opp->timers[i].qemu_timer_active = false;
+ opp->timers[i].qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ &qemu_timer_cb,
+ &opp->timers[i]);
+ opp->timers[i].opp = opp;
+ }
+}
+
+static void map_list(OpenPICState *opp, const MemReg *list, int *count)
+{
+ while (list->name) {
+ assert(*count < ARRAY_SIZE(opp->sub_io_mem));
+
+ memory_region_init_io(&opp->sub_io_mem[*count], OBJECT(opp), list->ops,
+ opp, list->name, list->size);
+
+ memory_region_add_subregion(&opp->mem, list->start_addr,
+ &opp->sub_io_mem[*count]);
+
+ (*count)++;
+ list++;
+ }
+}
+
+static const VMStateDescription vmstate_openpic_irq_queue = {
+ .name = "openpic_irq_queue",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size),
+ VMSTATE_INT32(next, IRQQueue),
+ VMSTATE_INT32(priority, IRQQueue),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_openpic_irqdest = {
+ .name = "openpic_irqdest",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(ctpr, IRQDest),
+ VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue,
+ IRQQueue),
+ VMSTATE_STRUCT(servicing, IRQDest, 0, vmstate_openpic_irq_queue,
+ IRQQueue),
+ VMSTATE_UINT32_ARRAY(outputs_active, IRQDest, OPENPIC_OUTPUT_NB),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_openpic_irqsource = {
+ .name = "openpic_irqsource",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(ivpr, IRQSource),
+ VMSTATE_UINT32(idr, IRQSource),
+ VMSTATE_UINT32(destmask, IRQSource),
+ VMSTATE_INT32(last_cpu, IRQSource),
+ VMSTATE_INT32(pending, IRQSource),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_openpic_timer = {
+ .name = "openpic_timer",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(tccr, OpenPICTimer),
+ VMSTATE_UINT32(tbcr, OpenPICTimer),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_openpic_msi = {
+ .name = "openpic_msi",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(msir, OpenPICMSI),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int openpic_post_load(void *opaque, int version_id)
+{
+ OpenPICState *opp = (OpenPICState *)opaque;
+ int i;
+
+ /* Update internal ivpr and idr variables */
+ for (i = 0; i < opp->max_irq; i++) {
+ write_IRQreg_idr(opp, i, opp->src[i].idr);
+ write_IRQreg_ivpr(opp, i, opp->src[i].ivpr);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_openpic = {
+ .name = "openpic",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .post_load = openpic_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(gcr, OpenPICState),
+ VMSTATE_UINT32(vir, OpenPICState),
+ VMSTATE_UINT32(pir, OpenPICState),
+ VMSTATE_UINT32(spve, OpenPICState),
+ VMSTATE_UINT32(tfrr, OpenPICState),
+ VMSTATE_UINT32(max_irq, OpenPICState),
+ VMSTATE_STRUCT_VARRAY_UINT32(src, OpenPICState, max_irq, 0,
+ vmstate_openpic_irqsource, IRQSource),
+ VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState, NULL),
+ VMSTATE_STRUCT_VARRAY_UINT32(dst, OpenPICState, nb_cpus, 0,
+ vmstate_openpic_irqdest, IRQDest),
+ VMSTATE_STRUCT_ARRAY(timers, OpenPICState, OPENPIC_MAX_TMR, 0,
+ vmstate_openpic_timer, OpenPICTimer),
+ VMSTATE_STRUCT_ARRAY(msi, OpenPICState, MAX_MSI, 0,
+ vmstate_openpic_msi, OpenPICMSI),
+ VMSTATE_UINT32(irq_ipi0, OpenPICState),
+ VMSTATE_UINT32(irq_tim0, OpenPICState),
+ VMSTATE_UINT32(irq_msi, OpenPICState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void openpic_init(Object *obj)
+{
+ OpenPICState *opp = OPENPIC(obj);
+
+ memory_region_init(&opp->mem, obj, "openpic", 0x40000);
+}
+
+static void openpic_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(dev);
+ OpenPICState *opp = OPENPIC(dev);
+ int i, j;
+ int list_count = 0;
+ static const MemReg list_le[] = {
+ {"glb", &openpic_glb_ops_le,
+ OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
+ {"tmr", &openpic_tmr_ops_le,
+ OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
+ {"src", &openpic_src_ops_le,
+ OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
+ {"cpu", &openpic_cpu_ops_le,
+ OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
+ {NULL}
+ };
+ static const MemReg list_be[] = {
+ {"glb", &openpic_glb_ops_be,
+ OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
+ {"tmr", &openpic_tmr_ops_be,
+ OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
+ {"src", &openpic_src_ops_be,
+ OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
+ {"cpu", &openpic_cpu_ops_be,
+ OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
+ {NULL}
+ };
+ static const MemReg list_fsl[] = {
+ {"msi", &openpic_msi_ops_be,
+ OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE},
+ {"summary", &openpic_summary_ops_be,
+ OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE},
+ {NULL}
+ };
+
+ if (opp->nb_cpus > MAX_CPU) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
+ TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus,
+ (uint64_t)0, (uint64_t)MAX_CPU);
+ return;
+ }
+
+ switch (opp->model) {
+ case OPENPIC_MODEL_FSL_MPIC_20:
+ default:
+ opp->fsl = &fsl_mpic_20;
+ opp->brr1 = 0x00400200;
+ opp->flags |= OPENPIC_FLAG_IDR_CRIT;
+ opp->nb_irqs = 80;
+ opp->mpic_mode_mask = GCR_MODE_MIXED;
+
+ fsl_common_init(opp);
+ map_list(opp, list_be, &list_count);
+ map_list(opp, list_fsl, &list_count);
+
+ break;
+
+ case OPENPIC_MODEL_FSL_MPIC_42:
+ opp->fsl = &fsl_mpic_42;
+ opp->brr1 = 0x00400402;
+ opp->flags |= OPENPIC_FLAG_ILR;
+ opp->nb_irqs = 196;
+ opp->mpic_mode_mask = GCR_MODE_PROXY;
+
+ fsl_common_init(opp);
+ map_list(opp, list_be, &list_count);
+ map_list(opp, list_fsl, &list_count);
+
+ break;
+
+ case OPENPIC_MODEL_KEYLARGO:
+ opp->nb_irqs = KEYLARGO_MAX_EXT;
+ opp->vid = VID_REVISION_1_2;
+ opp->vir = VIR_GENERIC;
+ opp->vector_mask = 0xFF;
+ opp->tfrr_reset = 4160000;
+ opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK;
+ opp->idr_reset = 0;
+ opp->max_irq = KEYLARGO_MAX_IRQ;
+ opp->irq_ipi0 = KEYLARGO_IPI_IRQ;
+ opp->irq_tim0 = KEYLARGO_TMR_IRQ;
+ opp->brr1 = -1;
+ opp->mpic_mode_mask = GCR_MODE_MIXED;
+
+ if (opp->nb_cpus != 1) {
+ error_setg(errp, "Only UP supported today");
+ return;
+ }
+
+ map_list(opp, list_le, &list_count);
+ break;
+ }
+
+ for (i = 0; i < opp->nb_cpus; i++) {
+ opp->dst[i].irqs = g_new0(qemu_irq, OPENPIC_OUTPUT_NB);
+ for (j = 0; j < OPENPIC_OUTPUT_NB; j++) {
+ sysbus_init_irq(d, &opp->dst[i].irqs[j]);
+ }
+
+ opp->dst[i].raised.queue_size = IRQQUEUE_SIZE_BITS;
+ opp->dst[i].raised.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
+ opp->dst[i].servicing.queue_size = IRQQUEUE_SIZE_BITS;
+ opp->dst[i].servicing.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
+ }
+
+ sysbus_init_mmio(d, &opp->mem);
+ qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq);
+}
+
+static Property openpic_properties[] = {
+ DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20),
+ DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void openpic_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = openpic_realize;
+ device_class_set_props(dc, openpic_properties);
+ dc->reset = openpic_reset;
+ dc->vmsd = &vmstate_openpic;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo openpic_info = {
+ .name = TYPE_OPENPIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(OpenPICState),
+ .instance_init = openpic_init,
+ .class_init = openpic_class_init,
+};
+
+static void openpic_register_types(void)
+{
+ type_register_static(&openpic_info);
+}
+
+type_init(openpic_register_types)
diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c
new file mode 100644
index 000000000..557dd0c2b
--- /dev/null
+++ b/hw/intc/openpic_kvm.c
@@ -0,0 +1,294 @@
+/*
+ * KVM in-kernel OpenPIC
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include <sys/ioctl.h>
+#include "hw/ppc/openpic.h"
+#include "hw/ppc/openpic_kvm.h"
+#include "hw/pci/msi.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "sysemu/kvm.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qom/object.h"
+
+#define GCR_RESET 0x80000000
+
+OBJECT_DECLARE_SIMPLE_TYPE(KVMOpenPICState, KVM_OPENPIC)
+
+struct KVMOpenPICState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion mem;
+ MemoryListener mem_listener;
+ uint32_t fd;
+ uint32_t model;
+ hwaddr mapped;
+};
+
+static void kvm_openpic_set_irq(void *opaque, int n_IRQ, int level)
+{
+ kvm_set_irq(kvm_state, n_IRQ, level);
+}
+
+static void kvm_openpic_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ KVMOpenPICState *opp = opaque;
+ struct kvm_device_attr attr;
+ uint32_t val32 = val;
+ int ret;
+
+ attr.group = KVM_DEV_MPIC_GRP_REGISTER;
+ attr.attr = addr;
+ attr.addr = (uint64_t)(unsigned long)&val32;
+
+ ret = ioctl(opp->fd, KVM_SET_DEVICE_ATTR, &attr);
+ if (ret < 0) {
+ qemu_log_mask(LOG_UNIMP, "%s: %s %" PRIx64 "\n", __func__,
+ strerror(errno), attr.attr);
+ }
+}
+
+static void kvm_openpic_reset(DeviceState *d)
+{
+ KVMOpenPICState *opp = KVM_OPENPIC(d);
+
+ /* Trigger the GCR.RESET bit to reset the PIC */
+ kvm_openpic_write(opp, 0x1020, GCR_RESET, sizeof(uint32_t));
+}
+
+static uint64_t kvm_openpic_read(void *opaque, hwaddr addr, unsigned size)
+{
+ KVMOpenPICState *opp = opaque;
+ struct kvm_device_attr attr;
+ uint32_t val = 0xdeadbeef;
+ int ret;
+
+ attr.group = KVM_DEV_MPIC_GRP_REGISTER;
+ attr.attr = addr;
+ attr.addr = (uint64_t)(unsigned long)&val;
+
+ ret = ioctl(opp->fd, KVM_GET_DEVICE_ATTR, &attr);
+ if (ret < 0) {
+ qemu_log_mask(LOG_UNIMP, "%s: %s %" PRIx64 "\n", __func__,
+ strerror(errno), attr.attr);
+ return 0;
+ }
+
+ return val;
+}
+
+static const MemoryRegionOps kvm_openpic_mem_ops = {
+ .write = kvm_openpic_write,
+ .read = kvm_openpic_read,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void kvm_openpic_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ KVMOpenPICState *opp = container_of(listener, KVMOpenPICState,
+ mem_listener);
+ struct kvm_device_attr attr;
+ uint64_t reg_base;
+ int ret;
+
+ /* Ignore events on regions that are not us */
+ if (section->mr != &opp->mem) {
+ return;
+ }
+
+ if (opp->mapped) {
+ /*
+ * We can only map the MPIC once. Since we are already mapped,
+ * the best we can do is ignore new maps.
+ */
+ return;
+ }
+
+ reg_base = section->offset_within_address_space;
+ opp->mapped = reg_base;
+
+ attr.group = KVM_DEV_MPIC_GRP_MISC;
+ attr.attr = KVM_DEV_MPIC_BASE_ADDR;
+ attr.addr = (uint64_t)(unsigned long)&reg_base;
+
+ ret = ioctl(opp->fd, KVM_SET_DEVICE_ATTR, &attr);
+ if (ret < 0) {
+ fprintf(stderr, "%s: %s %" PRIx64 "\n", __func__,
+ strerror(errno), reg_base);
+ }
+}
+
+static void kvm_openpic_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ KVMOpenPICState *opp = container_of(listener, KVMOpenPICState,
+ mem_listener);
+ struct kvm_device_attr attr;
+ uint64_t reg_base = 0;
+ int ret;
+
+ /* Ignore events on regions that are not us */
+ if (section->mr != &opp->mem) {
+ return;
+ }
+
+ if (section->offset_within_address_space != opp->mapped) {
+ /*
+ * We can only map the MPIC once. This mapping was a secondary
+ * one that we couldn't fulfill. Ignore it.
+ */
+ return;
+ }
+ opp->mapped = 0;
+
+ attr.group = KVM_DEV_MPIC_GRP_MISC;
+ attr.attr = KVM_DEV_MPIC_BASE_ADDR;
+ attr.addr = (uint64_t)(unsigned long)&reg_base;
+
+ ret = ioctl(opp->fd, KVM_SET_DEVICE_ATTR, &attr);
+ if (ret < 0) {
+ fprintf(stderr, "%s: %s %" PRIx64 "\n", __func__,
+ strerror(errno), reg_base);
+ }
+}
+
+static void kvm_openpic_init(Object *obj)
+{
+ KVMOpenPICState *opp = KVM_OPENPIC(obj);
+
+ memory_region_init_io(&opp->mem, OBJECT(opp), &kvm_openpic_mem_ops, opp,
+ "kvm-openpic", 0x40000);
+}
+
+static void kvm_openpic_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(dev);
+ KVMOpenPICState *opp = KVM_OPENPIC(dev);
+ KVMState *s = kvm_state;
+ int kvm_openpic_model;
+ struct kvm_create_device cd = {0};
+ int ret, i;
+
+ if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
+ error_setg(errp, "Kernel is lacking Device Control API");
+ return;
+ }
+
+ switch (opp->model) {
+ case OPENPIC_MODEL_FSL_MPIC_20:
+ kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_20;
+ break;
+
+ case OPENPIC_MODEL_FSL_MPIC_42:
+ kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_42;
+ break;
+
+ default:
+ error_setg(errp, "Unsupported OpenPIC model %" PRIu32, opp->model);
+ return;
+ }
+
+ cd.type = kvm_openpic_model;
+ ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &cd);
+ if (ret < 0) {
+ error_setg(errp, "Can't create device %d: %s",
+ cd.type, strerror(errno));
+ return;
+ }
+ opp->fd = cd.fd;
+
+ sysbus_init_mmio(d, &opp->mem);
+ qdev_init_gpio_in(dev, kvm_openpic_set_irq, OPENPIC_MAX_IRQ);
+
+ opp->mem_listener.region_add = kvm_openpic_region_add;
+ opp->mem_listener.region_del = kvm_openpic_region_del;
+ opp->mem_listener.name = "openpic-kvm";
+ memory_listener_register(&opp->mem_listener, &address_space_memory);
+
+ /* indicate pic capabilities */
+ msi_nonbroken = true;
+ kvm_kernel_irqchip = true;
+ kvm_async_interrupts_allowed = true;
+
+ /* set up irq routing */
+ kvm_init_irq_routing(kvm_state);
+ for (i = 0; i < 256; ++i) {
+ kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
+ }
+
+ kvm_msi_via_irqfd_allowed = true;
+ kvm_gsi_routing_allowed = true;
+
+ kvm_irqchip_commit_routes(s);
+}
+
+int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs)
+{
+ KVMOpenPICState *opp = KVM_OPENPIC(d);
+
+ return kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_MPIC, 0, opp->fd,
+ kvm_arch_vcpu_id(cs));
+}
+
+static Property kvm_openpic_properties[] = {
+ DEFINE_PROP_UINT32("model", KVMOpenPICState, model,
+ OPENPIC_MODEL_FSL_MPIC_20),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void kvm_openpic_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = kvm_openpic_realize;
+ device_class_set_props(dc, kvm_openpic_properties);
+ dc->reset = kvm_openpic_reset;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo kvm_openpic_info = {
+ .name = TYPE_KVM_OPENPIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(KVMOpenPICState),
+ .instance_init = kvm_openpic_init,
+ .class_init = kvm_openpic_class_init,
+};
+
+static void kvm_openpic_register_types(void)
+{
+ type_register_static(&kvm_openpic_info);
+}
+
+type_init(kvm_openpic_register_types)
diff --git a/hw/intc/pl190.c b/hw/intc/pl190.c
new file mode 100644
index 000000000..cd8844360
--- /dev/null
+++ b/hw/intc/pl190.c
@@ -0,0 +1,297 @@
+/*
+ * Arm PrimeCell PL190 Vector Interrupt Controller
+ *
+ * Copyright (c) 2006 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qom/object.h"
+
+/* The number of virtual priority levels. 16 user vectors plus the
+ unvectored IRQ. Chained interrupts would require an additional level
+ if implemented. */
+
+#define PL190_NUM_PRIO 17
+
+#define TYPE_PL190 "pl190"
+OBJECT_DECLARE_SIMPLE_TYPE(PL190State, PL190)
+
+struct PL190State {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ uint32_t level;
+ uint32_t soft_level;
+ uint32_t irq_enable;
+ uint32_t fiq_select;
+ uint8_t vect_control[16];
+ uint32_t vect_addr[PL190_NUM_PRIO];
+ /* Mask containing interrupts with higher priority than this one. */
+ uint32_t prio_mask[PL190_NUM_PRIO + 1];
+ int protected;
+ /* Current priority level. */
+ int priority;
+ int prev_prio[PL190_NUM_PRIO];
+ qemu_irq irq;
+ qemu_irq fiq;
+};
+
+static const unsigned char pl190_id[] =
+{ 0x90, 0x11, 0x04, 0x00, 0x0D, 0xf0, 0x05, 0xb1 };
+
+static inline uint32_t pl190_irq_level(PL190State *s)
+{
+ return (s->level | s->soft_level) & s->irq_enable & ~s->fiq_select;
+}
+
+/* Update interrupts. */
+static void pl190_update(PL190State *s)
+{
+ uint32_t level = pl190_irq_level(s);
+ int set;
+
+ set = (level & s->prio_mask[s->priority]) != 0;
+ qemu_set_irq(s->irq, set);
+ set = ((s->level | s->soft_level) & s->fiq_select) != 0;
+ qemu_set_irq(s->fiq, set);
+}
+
+static void pl190_set_irq(void *opaque, int irq, int level)
+{
+ PL190State *s = (PL190State *)opaque;
+
+ if (level)
+ s->level |= 1u << irq;
+ else
+ s->level &= ~(1u << irq);
+ pl190_update(s);
+}
+
+static void pl190_update_vectors(PL190State *s)
+{
+ uint32_t mask;
+ int i;
+ int n;
+
+ mask = 0;
+ for (i = 0; i < 16; i++)
+ {
+ s->prio_mask[i] = mask;
+ if (s->vect_control[i] & 0x20)
+ {
+ n = s->vect_control[i] & 0x1f;
+ mask |= 1 << n;
+ }
+ }
+ s->prio_mask[16] = mask;
+ pl190_update(s);
+}
+
+static uint64_t pl190_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PL190State *s = (PL190State *)opaque;
+ int i;
+
+ if (offset >= 0xfe0 && offset < 0x1000) {
+ return pl190_id[(offset - 0xfe0) >> 2];
+ }
+ if (offset >= 0x100 && offset < 0x140) {
+ return s->vect_addr[(offset - 0x100) >> 2];
+ }
+ if (offset >= 0x200 && offset < 0x240) {
+ return s->vect_control[(offset - 0x200) >> 2];
+ }
+ switch (offset >> 2) {
+ case 0: /* IRQSTATUS */
+ return pl190_irq_level(s);
+ case 1: /* FIQSATUS */
+ return (s->level | s->soft_level) & s->fiq_select;
+ case 2: /* RAWINTR */
+ return s->level | s->soft_level;
+ case 3: /* INTSELECT */
+ return s->fiq_select;
+ case 4: /* INTENABLE */
+ return s->irq_enable;
+ case 6: /* SOFTINT */
+ return s->soft_level;
+ case 8: /* PROTECTION */
+ return s->protected;
+ case 12: /* VECTADDR */
+ /* Read vector address at the start of an ISR. Increases the
+ * current priority level to that of the current interrupt.
+ *
+ * Since an enabled interrupt X at priority P causes prio_mask[Y]
+ * to have bit X set for all Y > P, this loop will stop with
+ * i == the priority of the highest priority set interrupt.
+ */
+ for (i = 0; i < s->priority; i++) {
+ if ((s->level | s->soft_level) & s->prio_mask[i + 1]) {
+ break;
+ }
+ }
+
+ /* Reading this value with no pending interrupts is undefined.
+ We return the default address. */
+ if (i == PL190_NUM_PRIO)
+ return s->vect_addr[16];
+ if (i < s->priority)
+ {
+ s->prev_prio[i] = s->priority;
+ s->priority = i;
+ pl190_update(s);
+ }
+ return s->vect_addr[s->priority];
+ case 13: /* DEFVECTADDR */
+ return s->vect_addr[16];
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "pl190_read: Bad offset %x\n", (int)offset);
+ return 0;
+ }
+}
+
+static void pl190_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PL190State *s = (PL190State *)opaque;
+
+ if (offset >= 0x100 && offset < 0x140) {
+ s->vect_addr[(offset - 0x100) >> 2] = val;
+ pl190_update_vectors(s);
+ return;
+ }
+ if (offset >= 0x200 && offset < 0x240) {
+ s->vect_control[(offset - 0x200) >> 2] = val;
+ pl190_update_vectors(s);
+ return;
+ }
+ switch (offset >> 2) {
+ case 0: /* SELECT */
+ /* This is a readonly register, but linux tries to write to it
+ anyway. Ignore the write. */
+ break;
+ case 3: /* INTSELECT */
+ s->fiq_select = val;
+ break;
+ case 4: /* INTENABLE */
+ s->irq_enable |= val;
+ break;
+ case 5: /* INTENCLEAR */
+ s->irq_enable &= ~val;
+ break;
+ case 6: /* SOFTINT */
+ s->soft_level |= val;
+ break;
+ case 7: /* SOFTINTCLEAR */
+ s->soft_level &= ~val;
+ break;
+ case 8: /* PROTECTION */
+ /* TODO: Protection (supervisor only access) is not implemented. */
+ s->protected = val & 1;
+ break;
+ case 12: /* VECTADDR */
+ /* Restore the previous priority level. The value written is
+ ignored. */
+ if (s->priority < PL190_NUM_PRIO)
+ s->priority = s->prev_prio[s->priority];
+ break;
+ case 13: /* DEFVECTADDR */
+ s->vect_addr[16] = val;
+ break;
+ case 0xc0: /* ITCR */
+ if (val) {
+ qemu_log_mask(LOG_UNIMP, "pl190: Test mode not implemented\n");
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "pl190_write: Bad offset %x\n", (int)offset);
+ return;
+ }
+ pl190_update(s);
+}
+
+static const MemoryRegionOps pl190_ops = {
+ .read = pl190_read,
+ .write = pl190_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void pl190_reset(DeviceState *d)
+{
+ PL190State *s = PL190(d);
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ s->vect_addr[i] = 0;
+ s->vect_control[i] = 0;
+ }
+ s->vect_addr[16] = 0;
+ s->prio_mask[17] = 0xffffffff;
+ s->priority = PL190_NUM_PRIO;
+ pl190_update_vectors(s);
+}
+
+static void pl190_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ PL190State *s = PL190(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ memory_region_init_io(&s->iomem, obj, &pl190_ops, s, "pl190", 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+ qdev_init_gpio_in(dev, pl190_set_irq, 32);
+ sysbus_init_irq(sbd, &s->irq);
+ sysbus_init_irq(sbd, &s->fiq);
+}
+
+static const VMStateDescription vmstate_pl190 = {
+ .name = "pl190",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(level, PL190State),
+ VMSTATE_UINT32(soft_level, PL190State),
+ VMSTATE_UINT32(irq_enable, PL190State),
+ VMSTATE_UINT32(fiq_select, PL190State),
+ VMSTATE_UINT8_ARRAY(vect_control, PL190State, 16),
+ VMSTATE_UINT32_ARRAY(vect_addr, PL190State, PL190_NUM_PRIO),
+ VMSTATE_UINT32_ARRAY(prio_mask, PL190State, PL190_NUM_PRIO+1),
+ VMSTATE_INT32(protected, PL190State),
+ VMSTATE_INT32(priority, PL190State),
+ VMSTATE_INT32_ARRAY(prev_prio, PL190State, PL190_NUM_PRIO),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void pl190_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = pl190_reset;
+ dc->vmsd = &vmstate_pl190;
+}
+
+static const TypeInfo pl190_info = {
+ .name = TYPE_PL190,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PL190State),
+ .instance_init = pl190_init,
+ .class_init = pl190_class_init,
+};
+
+static void pl190_register_types(void)
+{
+ type_register_static(&pl190_info);
+}
+
+type_init(pl190_register_types)
diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c
new file mode 100644
index 000000000..ad4348361
--- /dev/null
+++ b/hw/intc/pnv_xive.c
@@ -0,0 +1,1987 @@
+/*
+ * QEMU PowerPC XIVE interrupt controller model
+ *
+ * Copyright (c) 2017-2019, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "target/ppc/cpu.h"
+#include "sysemu/cpus.h"
+#include "sysemu/dma.h"
+#include "sysemu/reset.h"
+#include "monitor/monitor.h"
+#include "hw/ppc/fdt.h"
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_core.h"
+#include "hw/ppc/pnv_xscom.h"
+#include "hw/ppc/pnv_xive.h"
+#include "hw/ppc/xive_regs.h"
+#include "hw/qdev-properties.h"
+#include "hw/ppc/ppc.h"
+#include "trace.h"
+
+#include <libfdt.h>
+
+#include "pnv_xive_regs.h"
+
+#undef XIVE_DEBUG
+
+/*
+ * Virtual structures table (VST)
+ */
+#define SBE_PER_BYTE 4
+
+typedef struct XiveVstInfo {
+ const char *name;
+ uint32_t size;
+ uint32_t max_blocks;
+} XiveVstInfo;
+
+static const XiveVstInfo vst_infos[] = {
+ [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 },
+ [VST_TSEL_SBE] = { "SBE", 1, 16 },
+ [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
+ [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
+
+ /*
+ * Interrupt fifo backing store table (not modeled) :
+ *
+ * 0 - IPI,
+ * 1 - HWD,
+ * 2 - First escalate,
+ * 3 - Second escalate,
+ * 4 - Redistribution,
+ * 5 - IPI cascaded queue ?
+ */
+ [VST_TSEL_IRQ] = { "IRQ", 1, 6 },
+};
+
+#define xive_error(xive, fmt, ...) \
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
+ (xive)->chip->chip_id, ## __VA_ARGS__);
+
+/*
+ * QEMU version of the GETFIELD/SETFIELD macros
+ *
+ * TODO: It might be better to use the existing extract64() and
+ * deposit64() but this means that all the register definitions will
+ * change and become incompatible with the ones found in skiboot.
+ *
+ * Keep it as it is for now until we find a common ground.
+ */
+static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
+{
+ return (word & mask) >> ctz64(mask);
+}
+
+static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
+ uint64_t value)
+{
+ return (word & ~mask) | ((value << ctz64(mask)) & mask);
+}
+
+/*
+ * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
+ * field overrides the hardwired chip ID in the Powerbus operations
+ * and for CAM compares
+ */
+static uint8_t pnv_xive_block_id(PnvXive *xive)
+{
+ uint8_t blk = xive->chip->chip_id;
+ uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
+
+ if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
+ blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
+ }
+
+ return blk;
+}
+
+/*
+ * Remote access to controllers. HW uses MMIOs. For now, a simple scan
+ * of the chips is good enough.
+ *
+ * TODO: Block scope support
+ */
+static PnvXive *pnv_xive_get_remote(uint8_t blk)
+{
+ PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
+ int i;
+
+ for (i = 0; i < pnv->num_chips; i++) {
+ Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
+ PnvXive *xive = &chip9->xive;
+
+ if (pnv_xive_block_id(xive) == blk) {
+ return xive;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * VST accessors for SBE, EAT, ENDT, NVT
+ *
+ * Indirect VST tables are arrays of VSDs pointing to a page (of same
+ * size). Each page is a direct VST table.
+ */
+
+#define XIVE_VSD_SIZE 8
+
+/* Indirect page size can be 4K, 64K, 2M, 16M. */
+static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
+{
+ return page_shift == 12 || page_shift == 16 ||
+ page_shift == 21 || page_shift == 24;
+}
+
+static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
+ uint64_t vsd, uint32_t idx)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
+ uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
+ uint32_t idx_max;
+
+ idx_max = vst_tsize / info->size - 1;
+ if (idx > idx_max) {
+#ifdef XIVE_DEBUG
+ xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
+ info->name, idx, idx_max);
+#endif
+ return 0;
+ }
+
+ return vst_addr + idx * info->size;
+}
+
+static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
+ uint64_t vsd, uint32_t idx)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t vsd_addr;
+ uint32_t vsd_idx;
+ uint32_t page_shift;
+ uint32_t vst_per_page;
+
+ /* Get the page size of the indirect table. */
+ vsd_addr = vsd & VSD_ADDRESS_MASK;
+ vsd = ldq_be_dma(&address_space_memory, vsd_addr);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+#ifdef XIVE_DEBUG
+ xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
+#endif
+ return 0;
+ }
+
+ page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
+
+ if (!pnv_xive_vst_page_size_allowed(page_shift)) {
+ xive_error(xive, "VST: invalid %s page shift %d", info->name,
+ page_shift);
+ return 0;
+ }
+
+ vst_per_page = (1ull << page_shift) / info->size;
+ vsd_idx = idx / vst_per_page;
+
+ /* Load the VSD we are looking for, if not already done */
+ if (vsd_idx) {
+ vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
+ vsd = ldq_be_dma(&address_space_memory, vsd_addr);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+#ifdef XIVE_DEBUG
+ xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
+#endif
+ return 0;
+ }
+
+ /*
+ * Check that the pages have a consistent size across the
+ * indirect table
+ */
+ if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
+ xive_error(xive, "VST: %s entry %x indirect page size differ !?",
+ info->name, idx);
+ return 0;
+ }
+ }
+
+ return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
+}
+
+static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
+ uint32_t idx)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t vsd;
+
+ if (blk >= info->max_blocks) {
+ xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
+ blk, info->name, idx);
+ return 0;
+ }
+
+ vsd = xive->vsds[type][blk];
+
+ /* Remote VST access */
+ if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
+ xive = pnv_xive_get_remote(blk);
+
+ return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
+ }
+
+ if (VSD_INDIRECT & vsd) {
+ return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
+ }
+
+ return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
+}
+
+static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
+ uint32_t idx, void *data)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
+
+ if (!addr) {
+ return -1;
+ }
+
+ cpu_physical_memory_read(addr, data, info->size);
+ return 0;
+}
+
+#define XIVE_VST_WORD_ALL -1
+
+static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
+ uint32_t idx, void *data, uint32_t word_number)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
+
+ if (!addr) {
+ return -1;
+ }
+
+ if (word_number == XIVE_VST_WORD_ALL) {
+ cpu_physical_memory_write(addr, data, info->size);
+ } else {
+ cpu_physical_memory_write(addr + word_number * 4,
+ data + word_number * 4, 4);
+ }
+ return 0;
+}
+
+static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
+ XiveEND *end)
+{
+ return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
+}
+
+static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
+ XiveEND *end, uint8_t word_number)
+{
+ return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
+ word_number);
+}
+
+static int pnv_xive_end_update(PnvXive *xive)
+{
+ uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
+ xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
+ uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
+ xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
+ int i;
+ uint64_t eqc_watch[4];
+
+ for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
+ eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
+ }
+
+ return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
+ XIVE_VST_WORD_ALL);
+}
+
+static void pnv_xive_end_cache_load(PnvXive *xive)
+{
+ uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
+ xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
+ uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
+ xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
+ uint64_t eqc_watch[4] = { 0 };
+ int i;
+
+ if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
+ xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
+ xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
+ }
+}
+
+static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
+ XiveNVT *nvt)
+{
+ return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
+}
+
+static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
+ XiveNVT *nvt, uint8_t word_number)
+{
+ return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
+ word_number);
+}
+
+static int pnv_xive_nvt_update(PnvXive *xive)
+{
+ uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
+ xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
+ uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
+ xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
+ int i;
+ uint64_t vpc_watch[8];
+
+ for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
+ vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
+ }
+
+ return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
+ XIVE_VST_WORD_ALL);
+}
+
+static void pnv_xive_nvt_cache_load(PnvXive *xive)
+{
+ uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
+ xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
+ uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
+ xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
+ uint64_t vpc_watch[8] = { 0 };
+ int i;
+
+ if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
+ xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
+ xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
+ }
+}
+
+static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
+ XiveEAS *eas)
+{
+ PnvXive *xive = PNV_XIVE(xrtr);
+
+ /*
+ * EAT lookups should be local to the IC
+ */
+ if (pnv_xive_block_id(xive) != blk) {
+ xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
+ return -1;
+ }
+
+ return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
+}
+
+/*
+ * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
+ * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
+ * second register covers cores 16-23 (normal) or 8-11 (fused).
+ */
+static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
+{
+ int pir = ppc_cpu_pir(cpu);
+ uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
+ uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
+ uint32_t bit = pir & 0x3f;
+
+ return xive->regs[reg >> 3] & PPC_BIT(bit);
+}
+
+static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv, XiveTCTXMatch *match)
+{
+ PnvXive *xive = PNV_XIVE(xptr);
+ PnvChip *chip = xive->chip;
+ int count = 0;
+ int i, j;
+
+ for (i = 0; i < chip->nr_cores; i++) {
+ PnvCore *pc = chip->cores[i];
+ CPUCore *cc = CPU_CORE(pc);
+
+ for (j = 0; j < cc->nr_threads; j++) {
+ PowerPCCPU *cpu = pc->threads[j];
+ XiveTCTX *tctx;
+ int ring;
+
+ if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
+ continue;
+ }
+
+ tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+
+ /*
+ * Check the thread context CAM lines and record matches.
+ */
+ ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
+ nvt_idx, cam_ignore, logic_serv);
+ /*
+ * Save the context and follow on to catch duplicates, that we
+ * don't support yet.
+ */
+ if (ring != -1) {
+ if (match->tctx) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
+ "thread context NVT %x/%x\n",
+ nvt_blk, nvt_idx);
+ return -1;
+ }
+
+ match->ring = ring;
+ match->tctx = tctx;
+ count++;
+ }
+ }
+ }
+
+ return count;
+}
+
+static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
+{
+ return pnv_xive_block_id(PNV_XIVE(xrtr));
+}
+
+/*
+ * The TIMA MMIO space is shared among the chips and to identify the
+ * chip from which the access is being done, we extract the chip id
+ * from the PIR.
+ */
+static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
+{
+ int pir = ppc_cpu_pir(cpu);
+ XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
+ PnvXive *xive = PNV_XIVE(xptr);
+
+ if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
+ xive_error(xive, "IC: CPU %x is not enabled", pir);
+ }
+ return xive;
+}
+
+/*
+ * The internal sources (IPIs) of the interrupt controller have no
+ * knowledge of the XIVE chip on which they reside. Encode the block
+ * id in the source interrupt number before forwarding the source
+ * event notification to the Router. This is required on a multichip
+ * system.
+ */
+static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
+{
+ PnvXive *xive = PNV_XIVE(xn);
+ uint8_t blk = pnv_xive_block_id(xive);
+
+ xive_router_notify(xn, XIVE_EAS(blk, srcno));
+}
+
+/*
+ * XIVE helpers
+ */
+
+static uint64_t pnv_xive_vc_size(PnvXive *xive)
+{
+ return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
+}
+
+static uint64_t pnv_xive_edt_shift(PnvXive *xive)
+{
+ return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
+}
+
+static uint64_t pnv_xive_pc_size(PnvXive *xive)
+{
+ return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
+}
+
+static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
+{
+ uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
+ uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
+
+ return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
+}
+
+/*
+ * Compute the number of entries per indirect subpage.
+ */
+static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
+{
+ uint8_t blk = pnv_xive_block_id(xive);
+ uint64_t vsd = xive->vsds[type][blk];
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t vsd_addr;
+ uint32_t page_shift;
+
+ /* For direct tables, fake a valid value */
+ if (!(VSD_INDIRECT & vsd)) {
+ return 1;
+ }
+
+ /* Get the page size of the indirect table. */
+ vsd_addr = vsd & VSD_ADDRESS_MASK;
+ vsd = ldq_be_dma(&address_space_memory, vsd_addr);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+#ifdef XIVE_DEBUG
+ xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
+#endif
+ return 0;
+ }
+
+ page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
+
+ if (!pnv_xive_vst_page_size_allowed(page_shift)) {
+ xive_error(xive, "VST: invalid %s page shift %d", info->name,
+ page_shift);
+ return 0;
+ }
+
+ return (1ull << page_shift) / info->size;
+}
+
+/*
+ * EDT Table
+ *
+ * The Virtualization Controller MMIO region containing the IPI ESB
+ * pages and END ESB pages is sub-divided into "sets" which map
+ * portions of the VC region to the different ESB pages. It is
+ * configured at runtime through the EDT "Domain Table" to let the
+ * firmware decide how to split the VC address space between IPI ESB
+ * pages and END ESB pages.
+ */
+
+/*
+ * Computes the overall size of the IPI or the END ESB pages
+ */
+static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
+{
+ uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
+ uint64_t size = 0;
+ int i;
+
+ for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
+ uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
+
+ if (edt_type == type) {
+ size += edt_size;
+ }
+ }
+
+ return size;
+}
+
+/*
+ * Maps an offset of the VC region in the IPI or END region using the
+ * layout defined by the EDT "Domaine Table"
+ */
+static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
+ uint64_t type)
+{
+ int i;
+ uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
+ uint64_t edt_offset = vc_offset;
+
+ for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
+ uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
+
+ if (edt_type != type) {
+ edt_offset -= edt_size;
+ }
+ }
+
+ return edt_offset;
+}
+
+static void pnv_xive_edt_resize(PnvXive *xive)
+{
+ uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
+ uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
+
+ memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
+ memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
+
+ memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
+ memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
+}
+
+/*
+ * XIVE Table configuration. Only EDT is supported.
+ */
+static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
+{
+ uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
+ uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
+ uint64_t *xive_table;
+ uint8_t max_index;
+
+ switch (tsel) {
+ case CQ_TAR_TSEL_BLK:
+ max_index = ARRAY_SIZE(xive->blk);
+ xive_table = xive->blk;
+ break;
+ case CQ_TAR_TSEL_MIG:
+ max_index = ARRAY_SIZE(xive->mig);
+ xive_table = xive->mig;
+ break;
+ case CQ_TAR_TSEL_EDT:
+ max_index = ARRAY_SIZE(xive->edt);
+ xive_table = xive->edt;
+ break;
+ case CQ_TAR_TSEL_VDT:
+ max_index = ARRAY_SIZE(xive->vdt);
+ xive_table = xive->vdt;
+ break;
+ default:
+ xive_error(xive, "IC: invalid table %d", (int) tsel);
+ return -1;
+ }
+
+ if (tsel_index >= max_index) {
+ xive_error(xive, "IC: invalid index %d", (int) tsel_index);
+ return -1;
+ }
+
+ xive_table[tsel_index] = val;
+
+ if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
+ xive->regs[CQ_TAR >> 3] =
+ SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
+ }
+
+ /*
+ * EDT configuration is complete. Resize the MMIO windows exposing
+ * the IPI and the END ESBs in the VC region.
+ */
+ if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
+ pnv_xive_edt_resize(xive);
+ }
+
+ return 0;
+}
+
+/*
+ * Virtual Structure Tables (VST) configuration
+ */
+static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
+ uint8_t blk, uint64_t vsd)
+{
+ XiveENDSource *end_xsrc = &xive->end_source;
+ XiveSource *xsrc = &xive->ipi_source;
+ const XiveVstInfo *info = &vst_infos[type];
+ uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
+ uint64_t vst_tsize = 1ull << page_shift;
+ uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
+
+ /* Basic checks */
+
+ if (VSD_INDIRECT & vsd) {
+ if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
+ xive_error(xive, "VST: %s indirect tables are not enabled",
+ info->name);
+ return;
+ }
+
+ if (!pnv_xive_vst_page_size_allowed(page_shift)) {
+ xive_error(xive, "VST: invalid %s page shift %d", info->name,
+ page_shift);
+ return;
+ }
+ }
+
+ if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
+ xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
+ " page shift %d", info->name, vst_addr, page_shift);
+ return;
+ }
+
+ /* Record the table configuration (in SRAM on HW) */
+ xive->vsds[type][blk] = vsd;
+
+ /* Now tune the models with the configuration provided by the FW */
+
+ switch (type) {
+ case VST_TSEL_IVT: /* Nothing to be done */
+ break;
+
+ case VST_TSEL_EQDT:
+ /*
+ * Backing store pages for the END.
+ *
+ * If the table is direct, we can compute the number of PQ
+ * entries provisioned by FW (such as skiboot) and resize the
+ * END ESB window accordingly.
+ */
+ if (!(VSD_INDIRECT & vsd)) {
+ memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
+ * (1ull << xsrc->esb_shift));
+ }
+ memory_region_add_subregion(&xive->end_edt_mmio, 0,
+ &end_xsrc->esb_mmio);
+ break;
+
+ case VST_TSEL_SBE:
+ /*
+ * Backing store pages for the source PQ bits. The model does
+ * not use these PQ bits backed in RAM because the XiveSource
+ * model has its own.
+ *
+ * If the table is direct, we can compute the number of PQ
+ * entries provisioned by FW (such as skiboot) and resize the
+ * ESB window accordingly.
+ */
+ if (!(VSD_INDIRECT & vsd)) {
+ memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
+ * (1ull << xsrc->esb_shift));
+ }
+ memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
+ break;
+
+ case VST_TSEL_VPDT: /* Not modeled */
+ case VST_TSEL_IRQ: /* Not modeled */
+ /*
+ * These tables contains the backing store pages for the
+ * interrupt fifos of the VC sub-engine in case of overflow.
+ */
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/*
+ * Both PC and VC sub-engines are configured as each use the Virtual
+ * Structure Tables : SBE, EAS, END and NVT.
+ */
+static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
+{
+ uint8_t mode = GETFIELD(VSD_MODE, vsd);
+ uint8_t type = GETFIELD(VST_TABLE_SELECT,
+ xive->regs[VC_VSD_TABLE_ADDR >> 3]);
+ uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
+ xive->regs[VC_VSD_TABLE_ADDR >> 3]);
+ uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
+
+ if (type > VST_TSEL_IRQ) {
+ xive_error(xive, "VST: invalid table type %d", type);
+ return;
+ }
+
+ if (blk >= vst_infos[type].max_blocks) {
+ xive_error(xive, "VST: invalid block id %d for"
+ " %s table", blk, vst_infos[type].name);
+ return;
+ }
+
+ /*
+ * Only take the VC sub-engine configuration into account because
+ * the XiveRouter model combines both VC and PC sub-engines
+ */
+ if (pc_engine) {
+ return;
+ }
+
+ if (!vst_addr) {
+ xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
+ return;
+ }
+
+ switch (mode) {
+ case VSD_MODE_FORWARD:
+ xive->vsds[type][blk] = vsd;
+ break;
+
+ case VSD_MODE_EXCLUSIVE:
+ pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
+ break;
+
+ default:
+ xive_error(xive, "VST: unsupported table mode %d", mode);
+ return;
+ }
+}
+
+/*
+ * Interrupt controller MMIO region. The layout is compatible between
+ * 4K and 64K pages :
+ *
+ * Page 0 sub-engine BARs
+ * 0x000 - 0x3FF IC registers
+ * 0x400 - 0x7FF PC registers
+ * 0x800 - 0xFFF VC registers
+ *
+ * Page 1 Notify page (writes only)
+ * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB)
+ * 0x800 - 0xFFF forwards and syncs
+ *
+ * Page 2 LSI Trigger page (writes only) (not modeled)
+ * Page 3 LSI SB EOI page (reads only) (not modeled)
+ *
+ * Page 4-7 indirect TIMA
+ */
+
+/*
+ * IC - registers MMIO
+ */
+static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+ MemoryRegion *sysmem = get_system_memory();
+ uint32_t reg = offset >> 3;
+ bool is_chip0 = xive->chip->chip_id == 0;
+
+ switch (offset) {
+
+ /*
+ * XIVE CQ (PowerBus bridge) settings
+ */
+ case CQ_MSGSND: /* msgsnd for doorbells */
+ case CQ_FIRMASK_OR: /* FIR error reporting */
+ break;
+ case CQ_PBI_CTL:
+ if (val & CQ_PBI_PC_64K) {
+ xive->pc_shift = 16;
+ }
+ if (val & CQ_PBI_VC_64K) {
+ xive->vc_shift = 16;
+ }
+ break;
+ case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
+ /*
+ * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
+ */
+ break;
+
+ /*
+ * XIVE Virtualization Controller settings
+ */
+ case VC_GLOBAL_CONFIG:
+ break;
+
+ /*
+ * XIVE Presenter Controller settings
+ */
+ case PC_GLOBAL_CONFIG:
+ /*
+ * PC_GCONF_CHIPID_OVR
+ * Overrides Int command Chip ID with the Chip ID field (DEBUG)
+ */
+ break;
+ case PC_TCTXT_CFG:
+ /*
+ * TODO: block group support
+ */
+ break;
+ case PC_TCTXT_TRACK:
+ /*
+ * PC_TCTXT_TRACK_EN:
+ * enable block tracking and exchange of block ownership
+ * information between Interrupt controllers
+ */
+ break;
+
+ /*
+ * Misc settings
+ */
+ case VC_SBC_CONFIG: /* Store EOI configuration */
+ /*
+ * Configure store EOI if required by firwmare (skiboot has removed
+ * support recently though)
+ */
+ if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
+ xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
+ }
+ break;
+
+ case VC_EQC_CONFIG: /* TODO: silent escalation */
+ case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
+ break;
+
+ /*
+ * XIVE BAR settings (XSCOM only)
+ */
+ case CQ_RST_CTL:
+ /* bit4: resets all BAR registers */
+ break;
+
+ case CQ_IC_BAR: /* IC BAR. 8 pages */
+ xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
+ if (!(val & CQ_IC_BAR_VALID)) {
+ xive->ic_base = 0;
+ if (xive->regs[reg] & CQ_IC_BAR_VALID) {
+ memory_region_del_subregion(&xive->ic_mmio,
+ &xive->ic_reg_mmio);
+ memory_region_del_subregion(&xive->ic_mmio,
+ &xive->ic_notify_mmio);
+ memory_region_del_subregion(&xive->ic_mmio,
+ &xive->ic_lsi_mmio);
+ memory_region_del_subregion(&xive->ic_mmio,
+ &xive->tm_indirect_mmio);
+
+ memory_region_del_subregion(sysmem, &xive->ic_mmio);
+ }
+ } else {
+ xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
+ if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
+ memory_region_add_subregion(sysmem, xive->ic_base,
+ &xive->ic_mmio);
+
+ memory_region_add_subregion(&xive->ic_mmio, 0,
+ &xive->ic_reg_mmio);
+ memory_region_add_subregion(&xive->ic_mmio,
+ 1ul << xive->ic_shift,
+ &xive->ic_notify_mmio);
+ memory_region_add_subregion(&xive->ic_mmio,
+ 2ul << xive->ic_shift,
+ &xive->ic_lsi_mmio);
+ memory_region_add_subregion(&xive->ic_mmio,
+ 4ull << xive->ic_shift,
+ &xive->tm_indirect_mmio);
+ }
+ }
+ break;
+
+ case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
+ case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
+ xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
+ if (!(val & CQ_TM_BAR_VALID)) {
+ xive->tm_base = 0;
+ if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
+ memory_region_del_subregion(sysmem, &xive->tm_mmio);
+ }
+ } else {
+ xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
+ if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
+ memory_region_add_subregion(sysmem, xive->tm_base,
+ &xive->tm_mmio);
+ }
+ }
+ break;
+
+ case CQ_PC_BARM:
+ xive->regs[reg] = val;
+ memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
+ break;
+ case CQ_PC_BAR: /* From 32M to 512G */
+ if (!(val & CQ_PC_BAR_VALID)) {
+ xive->pc_base = 0;
+ if (xive->regs[reg] & CQ_PC_BAR_VALID) {
+ memory_region_del_subregion(sysmem, &xive->pc_mmio);
+ }
+ } else {
+ xive->pc_base = val & ~(CQ_PC_BAR_VALID);
+ if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
+ memory_region_add_subregion(sysmem, xive->pc_base,
+ &xive->pc_mmio);
+ }
+ }
+ break;
+
+ case CQ_VC_BARM:
+ xive->regs[reg] = val;
+ memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
+ break;
+ case CQ_VC_BAR: /* From 64M to 4TB */
+ if (!(val & CQ_VC_BAR_VALID)) {
+ xive->vc_base = 0;
+ if (xive->regs[reg] & CQ_VC_BAR_VALID) {
+ memory_region_del_subregion(sysmem, &xive->vc_mmio);
+ }
+ } else {
+ xive->vc_base = val & ~(CQ_VC_BAR_VALID);
+ if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
+ memory_region_add_subregion(sysmem, xive->vc_base,
+ &xive->vc_mmio);
+ }
+ }
+ break;
+
+ /*
+ * XIVE Table settings.
+ */
+ case CQ_TAR: /* Table Address */
+ break;
+ case CQ_TDR: /* Table Data */
+ pnv_xive_table_set_data(xive, val);
+ break;
+
+ /*
+ * XIVE VC & PC Virtual Structure Table settings
+ */
+ case VC_VSD_TABLE_ADDR:
+ case PC_VSD_TABLE_ADDR: /* Virtual table selector */
+ break;
+ case VC_VSD_TABLE_DATA: /* Virtual table setting */
+ case PC_VSD_TABLE_DATA:
+ pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
+ break;
+
+ /*
+ * Interrupt fifo overflow in memory backing store (Not modeled)
+ */
+ case VC_IRQ_CONFIG_IPI:
+ case VC_IRQ_CONFIG_HW:
+ case VC_IRQ_CONFIG_CASCADE1:
+ case VC_IRQ_CONFIG_CASCADE2:
+ case VC_IRQ_CONFIG_REDIST:
+ case VC_IRQ_CONFIG_IPI_CASC:
+ break;
+
+ /*
+ * XIVE hardware thread enablement
+ */
+ case PC_THREAD_EN_REG0: /* Physical Thread Enable */
+ case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
+ break;
+
+ case PC_THREAD_EN_REG0_SET:
+ xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
+ break;
+ case PC_THREAD_EN_REG1_SET:
+ xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
+ break;
+ case PC_THREAD_EN_REG0_CLR:
+ xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
+ break;
+ case PC_THREAD_EN_REG1_CLR:
+ xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
+ break;
+
+ /*
+ * Indirect TIMA access set up. Defines the PIR of the HW thread
+ * to use.
+ */
+ case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
+ break;
+
+ /*
+ * XIVE PC & VC cache updates for EAS, NVT and END
+ */
+ case VC_IVC_SCRUB_MASK:
+ case VC_IVC_SCRUB_TRIG:
+ break;
+
+ case VC_EQC_CWATCH_SPEC:
+ val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
+ break;
+ case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
+ break;
+ case VC_EQC_CWATCH_DAT0:
+ /* writing to DATA0 triggers the cache write */
+ xive->regs[reg] = val;
+ pnv_xive_end_update(xive);
+ break;
+ case VC_EQC_SCRUB_MASK:
+ case VC_EQC_SCRUB_TRIG:
+ /*
+ * The scrubbing registers flush the cache in RAM and can also
+ * invalidate.
+ */
+ break;
+
+ case PC_VPC_CWATCH_SPEC:
+ val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
+ break;
+ case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
+ break;
+ case PC_VPC_CWATCH_DAT0:
+ /* writing to DATA0 triggers the cache write */
+ xive->regs[reg] = val;
+ pnv_xive_nvt_update(xive);
+ break;
+ case PC_VPC_SCRUB_MASK:
+ case PC_VPC_SCRUB_TRIG:
+ /*
+ * The scrubbing registers flush the cache in RAM and can also
+ * invalidate.
+ */
+ break;
+
+
+ /*
+ * XIVE PC & VC cache invalidation
+ */
+ case PC_AT_KILL:
+ break;
+ case VC_AT_MACRO_KILL:
+ break;
+ case PC_AT_KILL_MASK:
+ case VC_AT_MACRO_KILL_MASK:
+ break;
+
+ default:
+ xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
+ return;
+ }
+
+ xive->regs[reg] = val;
+}
+
+static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+ uint64_t val = 0;
+ uint32_t reg = offset >> 3;
+
+ switch (offset) {
+ case CQ_CFG_PB_GEN:
+ case CQ_IC_BAR:
+ case CQ_TM1_BAR:
+ case CQ_TM2_BAR:
+ case CQ_PC_BAR:
+ case CQ_PC_BARM:
+ case CQ_VC_BAR:
+ case CQ_VC_BARM:
+ case CQ_TAR:
+ case CQ_TDR:
+ case CQ_PBI_CTL:
+
+ case PC_TCTXT_CFG:
+ case PC_TCTXT_TRACK:
+ case PC_TCTXT_INDIR0:
+ case PC_TCTXT_INDIR1:
+ case PC_TCTXT_INDIR2:
+ case PC_TCTXT_INDIR3:
+ case PC_GLOBAL_CONFIG:
+
+ case PC_VPC_SCRUB_MASK:
+
+ case VC_GLOBAL_CONFIG:
+ case VC_AIB_TX_ORDER_TAG2:
+
+ case VC_IRQ_CONFIG_IPI:
+ case VC_IRQ_CONFIG_HW:
+ case VC_IRQ_CONFIG_CASCADE1:
+ case VC_IRQ_CONFIG_CASCADE2:
+ case VC_IRQ_CONFIG_REDIST:
+ case VC_IRQ_CONFIG_IPI_CASC:
+
+ case VC_EQC_SCRUB_MASK:
+ case VC_IVC_SCRUB_MASK:
+ case VC_SBC_CONFIG:
+ case VC_AT_MACRO_KILL_MASK:
+ case VC_VSD_TABLE_ADDR:
+ case PC_VSD_TABLE_ADDR:
+ case VC_VSD_TABLE_DATA:
+ case PC_VSD_TABLE_DATA:
+ case PC_THREAD_EN_REG0:
+ case PC_THREAD_EN_REG1:
+ val = xive->regs[reg];
+ break;
+
+ /*
+ * XIVE hardware thread enablement
+ */
+ case PC_THREAD_EN_REG0_SET:
+ case PC_THREAD_EN_REG0_CLR:
+ val = xive->regs[PC_THREAD_EN_REG0 >> 3];
+ break;
+ case PC_THREAD_EN_REG1_SET:
+ case PC_THREAD_EN_REG1_CLR:
+ val = xive->regs[PC_THREAD_EN_REG1 >> 3];
+ break;
+
+ case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
+ val = 0xffffff0000000000;
+ break;
+
+ /*
+ * XIVE PC & VC cache updates for EAS, NVT and END
+ */
+ case VC_EQC_CWATCH_SPEC:
+ xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
+ val = xive->regs[reg];
+ break;
+ case VC_EQC_CWATCH_DAT0:
+ /*
+ * Load DATA registers from cache with data requested by the
+ * SPEC register
+ */
+ pnv_xive_end_cache_load(xive);
+ val = xive->regs[reg];
+ break;
+ case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
+ val = xive->regs[reg];
+ break;
+
+ case PC_VPC_CWATCH_SPEC:
+ xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
+ val = xive->regs[reg];
+ break;
+ case PC_VPC_CWATCH_DAT0:
+ /*
+ * Load DATA registers from cache with data requested by the
+ * SPEC register
+ */
+ pnv_xive_nvt_cache_load(xive);
+ val = xive->regs[reg];
+ break;
+ case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
+ val = xive->regs[reg];
+ break;
+
+ case PC_VPC_SCRUB_TRIG:
+ case VC_IVC_SCRUB_TRIG:
+ case VC_EQC_SCRUB_TRIG:
+ xive->regs[reg] &= ~VC_SCRUB_VALID;
+ val = xive->regs[reg];
+ break;
+
+ /*
+ * XIVE PC & VC cache invalidation
+ */
+ case PC_AT_KILL:
+ xive->regs[reg] &= ~PC_AT_KILL_VALID;
+ val = xive->regs[reg];
+ break;
+ case VC_AT_MACRO_KILL:
+ xive->regs[reg] &= ~VC_KILL_VALID;
+ val = xive->regs[reg];
+ break;
+
+ /*
+ * XIVE synchronisation
+ */
+ case VC_EQC_CONFIG:
+ val = VC_EQC_SYNC_MASK;
+ break;
+
+ default:
+ xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
+ }
+
+ return val;
+}
+
+static const MemoryRegionOps pnv_xive_ic_reg_ops = {
+ .read = pnv_xive_ic_reg_read,
+ .write = pnv_xive_ic_reg_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * IC - Notify MMIO port page (write only)
+ */
+#define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */
+#define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */
+#define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */
+#define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */
+#define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */
+#define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */
+#define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */
+#define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */
+
+/* VC synchronisation */
+#define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */
+#define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */
+#define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */
+#define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */
+#define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */
+
+/* PC synchronisation */
+#define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */
+#define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */
+#define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */
+
+static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
+{
+ uint8_t blk;
+ uint32_t idx;
+
+ trace_pnv_xive_ic_hw_trigger(addr, val);
+
+ if (val & XIVE_TRIGGER_END) {
+ xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
+ addr, val);
+ return;
+ }
+
+ /*
+ * Forward the source event notification directly to the Router.
+ * The source interrupt number should already be correctly encoded
+ * with the chip block id by the sending device (PHB, PSI).
+ */
+ blk = XIVE_EAS_BLOCK(val);
+ idx = XIVE_EAS_INDEX(val);
+
+ xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
+}
+
+static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+
+ /* VC: HW triggers */
+ switch (addr) {
+ case 0x000 ... 0x7FF:
+ pnv_xive_ic_hw_trigger(opaque, addr, val);
+ break;
+
+ /* VC: Forwarded IRQs */
+ case PNV_XIVE_FORWARD_IPI:
+ case PNV_XIVE_FORWARD_HW:
+ case PNV_XIVE_FORWARD_OS_ESC:
+ case PNV_XIVE_FORWARD_HW_ESC:
+ case PNV_XIVE_FORWARD_REDIS:
+ /* TODO: forwarded IRQs. Should be like HW triggers */
+ xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
+ addr, val);
+ break;
+
+ /* VC syncs */
+ case PNV_XIVE_SYNC_IPI:
+ case PNV_XIVE_SYNC_HW:
+ case PNV_XIVE_SYNC_OS_ESC:
+ case PNV_XIVE_SYNC_HW_ESC:
+ case PNV_XIVE_SYNC_REDIS:
+ break;
+
+ /* PC syncs */
+ case PNV_XIVE_SYNC_PULL:
+ case PNV_XIVE_SYNC_PUSH:
+ case PNV_XIVE_SYNC_VPC:
+ break;
+
+ default:
+ xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
+ }
+}
+
+static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+
+ /* loads are invalid */
+ xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
+ return -1;
+}
+
+static const MemoryRegionOps pnv_xive_ic_notify_ops = {
+ .read = pnv_xive_ic_notify_read,
+ .write = pnv_xive_ic_notify_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * IC - LSI MMIO handlers (not modeled)
+ */
+
+static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+
+ xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
+}
+
+static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+
+ xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
+ return -1;
+}
+
+static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
+ .read = pnv_xive_ic_lsi_read,
+ .write = pnv_xive_ic_lsi_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * IC - Indirect TIMA MMIO handlers
+ */
+
+/*
+ * When the TIMA is accessed from the indirect page, the thread id of
+ * the target CPU is configured in the PC_TCTXT_INDIR0 register before
+ * use. This is used for resets and for debug purpose also.
+ */
+static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
+{
+ PnvChip *chip = xive->chip;
+ uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
+ PowerPCCPU *cpu = NULL;
+ int pir;
+
+ if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
+ xive_error(xive, "IC: no indirect TIMA access in progress");
+ return NULL;
+ }
+
+ pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
+ cpu = pnv_chip_find_cpu(chip, pir);
+ if (!cpu) {
+ xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
+ return NULL;
+ }
+
+ /* Check that HW thread is XIVE enabled */
+ if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
+ xive_error(xive, "IC: CPU %x is not enabled", pir);
+ }
+
+ return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+}
+
+static void xive_tm_indirect_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
+
+ xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
+}
+
+static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
+
+ return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
+}
+
+static const MemoryRegionOps xive_tm_indirect_ops = {
+ .read = xive_tm_indirect_read,
+ .write = xive_tm_indirect_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+static void pnv_xive_tm_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
+ PnvXive *xive = pnv_xive_tm_get_xive(cpu);
+ XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+
+ xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
+}
+
+static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
+ PnvXive *xive = pnv_xive_tm_get_xive(cpu);
+ XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+
+ return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
+}
+
+const MemoryRegionOps pnv_xive_tm_ops = {
+ .read = pnv_xive_tm_read,
+ .write = pnv_xive_tm_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * Interrupt controller XSCOM region.
+ */
+static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
+{
+ switch (addr >> 3) {
+ case X_VC_EQC_CONFIG:
+ /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
+ return VC_EQC_SYNC_MASK;
+ default:
+ return pnv_xive_ic_reg_read(opaque, addr, size);
+ }
+}
+
+static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ pnv_xive_ic_reg_write(opaque, addr, val, size);
+}
+
+static const MemoryRegionOps pnv_xive_xscom_ops = {
+ .read = pnv_xive_xscom_read,
+ .write = pnv_xive_xscom_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ }
+};
+
+/*
+ * Virtualization Controller MMIO region containing the IPI and END ESB pages
+ */
+static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+ uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
+ uint64_t edt_type = 0;
+ uint64_t edt_offset;
+ MemTxResult result;
+ AddressSpace *edt_as = NULL;
+ uint64_t ret = -1;
+
+ if (edt_index < XIVE_TABLE_EDT_MAX) {
+ edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
+ }
+
+ switch (edt_type) {
+ case CQ_TDR_EDT_IPI:
+ edt_as = &xive->ipi_as;
+ break;
+ case CQ_TDR_EDT_EQ:
+ edt_as = &xive->end_as;
+ break;
+ default:
+ xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
+ return -1;
+ }
+
+ /* Remap the offset for the targeted address space */
+ edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
+
+ ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
+ &result);
+
+ if (result != MEMTX_OK) {
+ xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
+ HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
+ offset, edt_offset);
+ return -1;
+ }
+
+ return ret;
+}
+
+static void pnv_xive_vc_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+ uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
+ uint64_t edt_type = 0;
+ uint64_t edt_offset;
+ MemTxResult result;
+ AddressSpace *edt_as = NULL;
+
+ if (edt_index < XIVE_TABLE_EDT_MAX) {
+ edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
+ }
+
+ switch (edt_type) {
+ case CQ_TDR_EDT_IPI:
+ edt_as = &xive->ipi_as;
+ break;
+ case CQ_TDR_EDT_EQ:
+ edt_as = &xive->end_as;
+ break;
+ default:
+ xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
+ offset);
+ return;
+ }
+
+ /* Remap the offset for the targeted address space */
+ edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
+
+ address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
+ }
+}
+
+static const MemoryRegionOps pnv_xive_vc_ops = {
+ .read = pnv_xive_vc_read,
+ .write = pnv_xive_vc_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * Presenter Controller MMIO region. The Virtualization Controller
+ * updates the IPB in the NVT table when required. Not modeled.
+ */
+static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+
+ xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
+ return -1;
+}
+
+static void pnv_xive_pc_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ PnvXive *xive = PNV_XIVE(opaque);
+
+ xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
+}
+
+static const MemoryRegionOps pnv_xive_pc_ops = {
+ .read = pnv_xive_pc_read,
+ .write = pnv_xive_pc_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
+ Monitor *mon)
+{
+ uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
+ uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
+
+ if (!xive_nvt_is_valid(nvt)) {
+ return;
+ }
+
+ monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
+ eq_blk, eq_idx,
+ xive_get_field32(NVT_W4_IPB, nvt->w4));
+}
+
+void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
+{
+ XiveRouter *xrtr = XIVE_ROUTER(xive);
+ uint8_t blk = pnv_xive_block_id(xive);
+ uint8_t chip_id = xive->chip->chip_id;
+ uint32_t srcno0 = XIVE_EAS(blk, 0);
+ uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
+ XiveEAS eas;
+ XiveEND end;
+ XiveNVT nvt;
+ int i;
+ uint64_t xive_nvt_per_subpage;
+
+ monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
+ srcno0, srcno0 + nr_ipis - 1);
+ xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
+
+ monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
+ srcno0, srcno0 + nr_ipis - 1);
+ for (i = 0; i < nr_ipis; i++) {
+ if (xive_router_get_eas(xrtr, blk, i, &eas)) {
+ break;
+ }
+ if (!xive_eas_is_masked(&eas)) {
+ xive_eas_pic_print_info(&eas, i, mon);
+ }
+ }
+
+ monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
+ i = 0;
+ while (!xive_router_get_end(xrtr, blk, i, &end)) {
+ xive_end_pic_print_info(&end, i++, mon);
+ }
+
+ monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
+ i = 0;
+ while (!xive_router_get_end(xrtr, blk, i, &end)) {
+ xive_end_eas_pic_print_info(&end, i++, mon);
+ }
+
+ monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
+ 0, XIVE_NVT_COUNT - 1);
+ xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
+ for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
+ while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
+ xive_nvt_pic_print_info(&nvt, i++, mon);
+ }
+ }
+}
+
+static void pnv_xive_reset(void *dev)
+{
+ PnvXive *xive = PNV_XIVE(dev);
+ XiveSource *xsrc = &xive->ipi_source;
+ XiveENDSource *end_xsrc = &xive->end_source;
+
+ /* Default page size (Should be changed at runtime to 64k) */
+ xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
+
+ /* Clear subregions */
+ if (memory_region_is_mapped(&xsrc->esb_mmio)) {
+ memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
+ }
+
+ if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
+ memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
+ }
+
+ if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
+ memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
+ }
+
+ if (memory_region_is_mapped(&xive->end_edt_mmio)) {
+ memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
+ }
+}
+
+static void pnv_xive_init(Object *obj)
+{
+ PnvXive *xive = PNV_XIVE(obj);
+
+ object_initialize_child(obj, "ipi_source", &xive->ipi_source,
+ TYPE_XIVE_SOURCE);
+ object_initialize_child(obj, "end_source", &xive->end_source,
+ TYPE_XIVE_END_SOURCE);
+}
+
+/*
+ * Maximum number of IRQs and ENDs supported by HW
+ */
+#define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
+#define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
+
+static void pnv_xive_realize(DeviceState *dev, Error **errp)
+{
+ PnvXive *xive = PNV_XIVE(dev);
+ PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
+ XiveSource *xsrc = &xive->ipi_source;
+ XiveENDSource *end_xsrc = &xive->end_source;
+ Error *local_err = NULL;
+
+ pxc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ assert(xive->chip);
+
+ /*
+ * The XiveSource and XiveENDSource objects are realized with the
+ * maximum allowed HW configuration. The ESB MMIO regions will be
+ * resized dynamically when the controller is configured by the FW
+ * to limit accesses to resources not provisioned.
+ */
+ object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
+ &error_fatal);
+ object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
+ if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
+ return;
+ }
+
+ object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
+ &error_fatal);
+ object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
+ &error_abort);
+ if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
+ return;
+ }
+
+ /* Default page size. Generally changed at runtime to 64k */
+ xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
+
+ /* XSCOM region, used for initial configuration of the BARs */
+ memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
+ xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
+
+ /* Interrupt controller MMIO regions */
+ memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
+ PNV9_XIVE_IC_SIZE);
+
+ memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
+ xive, "xive-ic-reg", 1 << xive->ic_shift);
+ memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
+ &pnv_xive_ic_notify_ops,
+ xive, "xive-ic-notify", 1 << xive->ic_shift);
+
+ /* The Pervasive LSI trigger and EOI pages (not modeled) */
+ memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
+ xive, "xive-ic-lsi", 2 << xive->ic_shift);
+
+ /* Thread Interrupt Management Area (Indirect) */
+ memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
+ &xive_tm_indirect_ops,
+ xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
+ /*
+ * Overall Virtualization Controller MMIO region containing the
+ * IPI ESB pages and END ESB pages. The layout is defined by the
+ * EDT "Domain table" and the accesses are dispatched using
+ * address spaces for each.
+ */
+ memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
+ "xive-vc", PNV9_XIVE_VC_SIZE);
+
+ memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
+ PNV9_XIVE_VC_SIZE);
+ address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
+ memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
+ PNV9_XIVE_VC_SIZE);
+ address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
+
+ /*
+ * The MMIO windows exposing the IPI ESBs and the END ESBs in the
+ * VC region. Their size is configured by the FW in the EDT table.
+ */
+ memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
+ memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
+
+ /* Presenter Controller MMIO region (not modeled) */
+ memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
+ "xive-pc", PNV9_XIVE_PC_SIZE);
+
+ /* Thread Interrupt Management Area (Direct) */
+ memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
+ xive, "xive-tima", PNV9_XIVE_TM_SIZE);
+
+ qemu_register_reset(pnv_xive_reset, dev);
+}
+
+static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
+ int xscom_offset)
+{
+ const char compat[] = "ibm,power9-xive-x";
+ char *name;
+ int offset;
+ uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
+ uint32_t reg[] = {
+ cpu_to_be32(lpc_pcba),
+ cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
+ };
+
+ name = g_strdup_printf("xive@%x", lpc_pcba);
+ offset = fdt_add_subnode(fdt, xscom_offset, name);
+ _FDT(offset);
+ g_free(name);
+
+ _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
+ _FDT((fdt_setprop(fdt, offset, "compatible", compat,
+ sizeof(compat))));
+ return 0;
+}
+
+static Property pnv_xive_properties[] = {
+ DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
+ DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
+ DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
+ DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
+ /* The PnvChip id identifies the XIVE interrupt controller. */
+ DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pnv_xive_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
+ XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
+ XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
+ XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
+ PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
+
+ xdc->dt_xscom = pnv_xive_dt_xscom;
+
+ dc->desc = "PowerNV XIVE Interrupt Controller";
+ device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
+ dc->realize = pnv_xive_realize;
+ device_class_set_props(dc, pnv_xive_properties);
+
+ xrc->get_eas = pnv_xive_get_eas;
+ xrc->get_end = pnv_xive_get_end;
+ xrc->write_end = pnv_xive_write_end;
+ xrc->get_nvt = pnv_xive_get_nvt;
+ xrc->write_nvt = pnv_xive_write_nvt;
+ xrc->get_block_id = pnv_xive_get_block_id;
+
+ xnc->notify = pnv_xive_notify;
+ xpc->match_nvt = pnv_xive_match_nvt;
+};
+
+static const TypeInfo pnv_xive_info = {
+ .name = TYPE_PNV_XIVE,
+ .parent = TYPE_XIVE_ROUTER,
+ .instance_init = pnv_xive_init,
+ .instance_size = sizeof(PnvXive),
+ .class_init = pnv_xive_class_init,
+ .class_size = sizeof(PnvXiveClass),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_PNV_XSCOM_INTERFACE },
+ { }
+ }
+};
+
+static void pnv_xive_register_types(void)
+{
+ type_register_static(&pnv_xive_info);
+}
+
+type_init(pnv_xive_register_types)
diff --git a/hw/intc/pnv_xive_regs.h b/hw/intc/pnv_xive_regs.h
new file mode 100644
index 000000000..c78f030c0
--- /dev/null
+++ b/hw/intc/pnv_xive_regs.h
@@ -0,0 +1,248 @@
+/*
+ * QEMU PowerPC XIVE interrupt controller model
+ *
+ * Copyright (c) 2017-2018, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PPC_PNV_XIVE_REGS_H
+#define PPC_PNV_XIVE_REGS_H
+
+/* IC register offsets 0x0 - 0x400 */
+#define CQ_SWI_CMD_HIST 0x020
+#define CQ_SWI_CMD_POLL 0x028
+#define CQ_SWI_CMD_BCAST 0x030
+#define CQ_SWI_CMD_ASSIGN 0x038
+#define CQ_SWI_CMD_BLK_UPD 0x040
+#define CQ_SWI_RSP 0x048
+#define CQ_CFG_PB_GEN 0x050
+#define CQ_INT_ADDR_OPT PPC_BITMASK(14, 15)
+#define CQ_MSGSND 0x058
+#define CQ_CNPM_SEL 0x078
+#define CQ_IC_BAR 0x080
+#define CQ_IC_BAR_VALID PPC_BIT(0)
+#define CQ_IC_BAR_64K PPC_BIT(1)
+#define CQ_TM1_BAR 0x90
+#define CQ_TM2_BAR 0x0a0
+#define CQ_TM_BAR_VALID PPC_BIT(0)
+#define CQ_TM_BAR_64K PPC_BIT(1)
+#define CQ_PC_BAR 0x0b0
+#define CQ_PC_BAR_VALID PPC_BIT(0)
+#define CQ_PC_BARM 0x0b8
+#define CQ_PC_BARM_MASK PPC_BITMASK(26, 38)
+#define CQ_VC_BAR 0x0c0
+#define CQ_VC_BAR_VALID PPC_BIT(0)
+#define CQ_VC_BARM 0x0c8
+#define CQ_VC_BARM_MASK PPC_BITMASK(21, 37)
+#define CQ_TAR 0x0f0
+#define CQ_TAR_TBL_AUTOINC PPC_BIT(0)
+#define CQ_TAR_TSEL PPC_BITMASK(12, 15)
+#define CQ_TAR_TSEL_BLK PPC_BIT(12)
+#define CQ_TAR_TSEL_MIG PPC_BIT(13)
+#define CQ_TAR_TSEL_VDT PPC_BIT(14)
+#define CQ_TAR_TSEL_EDT PPC_BIT(15)
+#define CQ_TAR_TSEL_INDEX PPC_BITMASK(26, 31)
+#define CQ_TDR 0x0f8
+#define CQ_TDR_VDT_VALID PPC_BIT(0)
+#define CQ_TDR_VDT_BLK PPC_BITMASK(11, 15)
+#define CQ_TDR_VDT_INDEX PPC_BITMASK(28, 31)
+#define CQ_TDR_EDT_TYPE PPC_BITMASK(0, 1)
+#define CQ_TDR_EDT_INVALID 0
+#define CQ_TDR_EDT_IPI 1
+#define CQ_TDR_EDT_EQ 2
+#define CQ_TDR_EDT_BLK PPC_BITMASK(12, 15)
+#define CQ_TDR_EDT_INDEX PPC_BITMASK(26, 31)
+#define CQ_PBI_CTL 0x100
+#define CQ_PBI_PC_64K PPC_BIT(5)
+#define CQ_PBI_VC_64K PPC_BIT(6)
+#define CQ_PBI_LNX_TRIG PPC_BIT(7)
+#define CQ_PBI_FORCE_TM_LOCAL PPC_BIT(22)
+#define CQ_PBO_CTL 0x108
+#define CQ_AIB_CTL 0x110
+#define CQ_RST_CTL 0x118
+#define CQ_FIRMASK 0x198
+#define CQ_FIRMASK_AND 0x1a0
+#define CQ_FIRMASK_OR 0x1a8
+
+/* PC LBS1 register offsets 0x400 - 0x800 */
+#define PC_TCTXT_CFG 0x400
+#define PC_TCTXT_CFG_BLKGRP_EN PPC_BIT(0)
+#define PC_TCTXT_CFG_TARGET_EN PPC_BIT(1)
+#define PC_TCTXT_CFG_LGS_EN PPC_BIT(2)
+#define PC_TCTXT_CFG_STORE_ACK PPC_BIT(3)
+#define PC_TCTXT_CFG_HARD_CHIPID_BLK PPC_BIT(8)
+#define PC_TCTXT_CHIPID_OVERRIDE PPC_BIT(9)
+#define PC_TCTXT_CHIPID PPC_BITMASK(12, 15)
+#define PC_TCTXT_INIT_AGE PPC_BITMASK(30, 31)
+#define PC_TCTXT_TRACK 0x408
+#define PC_TCTXT_TRACK_EN PPC_BIT(0)
+#define PC_TCTXT_INDIR0 0x420
+#define PC_TCTXT_INDIR_VALID PPC_BIT(0)
+#define PC_TCTXT_INDIR_THRDID PPC_BITMASK(9, 15)
+#define PC_TCTXT_INDIR1 0x428
+#define PC_TCTXT_INDIR2 0x430
+#define PC_TCTXT_INDIR3 0x438
+#define PC_THREAD_EN_REG0 0x440
+#define PC_THREAD_EN_REG0_SET 0x448
+#define PC_THREAD_EN_REG0_CLR 0x450
+#define PC_THREAD_EN_REG1 0x460
+#define PC_THREAD_EN_REG1_SET 0x468
+#define PC_THREAD_EN_REG1_CLR 0x470
+#define PC_GLOBAL_CONFIG 0x480
+#define PC_GCONF_INDIRECT PPC_BIT(32)
+#define PC_GCONF_CHIPID_OVR PPC_BIT(40)
+#define PC_GCONF_CHIPID PPC_BITMASK(44, 47)
+#define PC_VSD_TABLE_ADDR 0x488
+#define PC_VSD_TABLE_DATA 0x490
+#define PC_AT_KILL 0x4b0
+#define PC_AT_KILL_VALID PPC_BIT(0)
+#define PC_AT_KILL_BLOCK_ID PPC_BITMASK(27, 31)
+#define PC_AT_KILL_OFFSET PPC_BITMASK(48, 60)
+#define PC_AT_KILL_MASK 0x4b8
+
+/* PC LBS2 register offsets */
+#define PC_VPC_CACHE_ENABLE 0x708
+#define PC_VPC_CACHE_EN_MASK PPC_BITMASK(0, 31)
+#define PC_VPC_SCRUB_TRIG 0x710
+#define PC_VPC_SCRUB_MASK 0x718
+#define PC_SCRUB_VALID PPC_BIT(0)
+#define PC_SCRUB_WANT_DISABLE PPC_BIT(1)
+#define PC_SCRUB_WANT_INVAL PPC_BIT(2)
+#define PC_SCRUB_BLOCK_ID PPC_BITMASK(27, 31)
+#define PC_SCRUB_OFFSET PPC_BITMASK(45, 63)
+#define PC_VPC_CWATCH_SPEC 0x738
+#define PC_VPC_CWATCH_CONFLICT PPC_BIT(0)
+#define PC_VPC_CWATCH_FULL PPC_BIT(8)
+#define PC_VPC_CWATCH_BLOCKID PPC_BITMASK(27, 31)
+#define PC_VPC_CWATCH_OFFSET PPC_BITMASK(45, 63)
+#define PC_VPC_CWATCH_DAT0 0x740
+#define PC_VPC_CWATCH_DAT1 0x748
+#define PC_VPC_CWATCH_DAT2 0x750
+#define PC_VPC_CWATCH_DAT3 0x758
+#define PC_VPC_CWATCH_DAT4 0x760
+#define PC_VPC_CWATCH_DAT5 0x768
+#define PC_VPC_CWATCH_DAT6 0x770
+#define PC_VPC_CWATCH_DAT7 0x778
+
+/* VC0 register offsets 0x800 - 0xFFF */
+#define VC_GLOBAL_CONFIG 0x800
+#define VC_GCONF_INDIRECT PPC_BIT(32)
+#define VC_VSD_TABLE_ADDR 0x808
+#define VC_VSD_TABLE_DATA 0x810
+#define VC_IVE_ISB_BLOCK_MODE 0x818
+#define VC_EQD_BLOCK_MODE 0x820
+#define VC_VPS_BLOCK_MODE 0x828
+#define VC_IRQ_CONFIG_IPI 0x840
+#define VC_IRQ_CONFIG_MEMB_EN PPC_BIT(45)
+#define VC_IRQ_CONFIG_MEMB_SZ PPC_BITMASK(46, 51)
+#define VC_IRQ_CONFIG_HW 0x848
+#define VC_IRQ_CONFIG_CASCADE1 0x850
+#define VC_IRQ_CONFIG_CASCADE2 0x858
+#define VC_IRQ_CONFIG_REDIST 0x860
+#define VC_IRQ_CONFIG_IPI_CASC 0x868
+#define VC_AIB_TX_ORDER_TAG2_REL_TF PPC_BIT(20)
+#define VC_AIB_TX_ORDER_TAG2 0x890
+#define VC_AT_MACRO_KILL 0x8b0
+#define VC_AT_MACRO_KILL_MASK 0x8b8
+#define VC_KILL_VALID PPC_BIT(0)
+#define VC_KILL_TYPE PPC_BITMASK(14, 15)
+#define VC_KILL_IRQ 0
+#define VC_KILL_IVC 1
+#define VC_KILL_SBC 2
+#define VC_KILL_EQD 3
+#define VC_KILL_BLOCK_ID PPC_BITMASK(27, 31)
+#define VC_KILL_OFFSET PPC_BITMASK(48, 60)
+#define VC_EQC_CACHE_ENABLE 0x908
+#define VC_EQC_CACHE_EN_MASK PPC_BITMASK(0, 15)
+#define VC_EQC_SCRUB_TRIG 0x910
+#define VC_EQC_SCRUB_MASK 0x918
+#define VC_EQC_CONFIG 0x920
+#define X_VC_EQC_CONFIG 0x214 /* XSCOM register */
+#define VC_EQC_CONF_SYNC_IPI PPC_BIT(32)
+#define VC_EQC_CONF_SYNC_HW PPC_BIT(33)
+#define VC_EQC_CONF_SYNC_ESC1 PPC_BIT(34)
+#define VC_EQC_CONF_SYNC_ESC2 PPC_BIT(35)
+#define VC_EQC_CONF_SYNC_REDI PPC_BIT(36)
+#define VC_EQC_CONF_EQP_INTERLEAVE PPC_BIT(38)
+#define VC_EQC_CONF_ENABLE_END_s_BIT PPC_BIT(39)
+#define VC_EQC_CONF_ENABLE_END_u_BIT PPC_BIT(40)
+#define VC_EQC_CONF_ENABLE_END_c_BIT PPC_BIT(41)
+#define VC_EQC_CONF_ENABLE_MORE_QSZ PPC_BIT(42)
+#define VC_EQC_CONF_SKIP_ESCALATE PPC_BIT(43)
+#define VC_EQC_CWATCH_SPEC 0x928
+#define VC_EQC_CWATCH_CONFLICT PPC_BIT(0)
+#define VC_EQC_CWATCH_FULL PPC_BIT(8)
+#define VC_EQC_CWATCH_BLOCKID PPC_BITMASK(28, 31)
+#define VC_EQC_CWATCH_OFFSET PPC_BITMASK(40, 63)
+#define VC_EQC_CWATCH_DAT0 0x930
+#define VC_EQC_CWATCH_DAT1 0x938
+#define VC_EQC_CWATCH_DAT2 0x940
+#define VC_EQC_CWATCH_DAT3 0x948
+#define VC_IVC_SCRUB_TRIG 0x990
+#define VC_IVC_SCRUB_MASK 0x998
+#define VC_SBC_SCRUB_TRIG 0xa10
+#define VC_SBC_SCRUB_MASK 0xa18
+#define VC_SCRUB_VALID PPC_BIT(0)
+#define VC_SCRUB_WANT_DISABLE PPC_BIT(1)
+#define VC_SCRUB_WANT_INVAL PPC_BIT(2) /* EQC and SBC only */
+#define VC_SCRUB_BLOCK_ID PPC_BITMASK(28, 31)
+#define VC_SCRUB_OFFSET PPC_BITMASK(40, 63)
+#define VC_IVC_CACHE_ENABLE 0x988
+#define VC_IVC_CACHE_EN_MASK PPC_BITMASK(0, 15)
+#define VC_SBC_CACHE_ENABLE 0xa08
+#define VC_SBC_CACHE_EN_MASK PPC_BITMASK(0, 15)
+#define VC_IVC_CACHE_SCRUB_TRIG 0x990
+#define VC_IVC_CACHE_SCRUB_MASK 0x998
+#define VC_SBC_CACHE_ENABLE 0xa08
+#define VC_SBC_CACHE_SCRUB_TRIG 0xa10
+#define VC_SBC_CACHE_SCRUB_MASK 0xa18
+#define VC_SBC_CONFIG 0xa20
+#define VC_SBC_CONF_CPLX_CIST PPC_BIT(44)
+#define VC_SBC_CONF_CIST_BOTH PPC_BIT(45)
+#define VC_SBC_CONF_NO_UPD_PRF PPC_BIT(59)
+
+/* VC1 register offsets */
+
+/* VSD Table address register definitions (shared) */
+#define VST_ADDR_AUTOINC PPC_BIT(0)
+#define VST_TABLE_SELECT PPC_BITMASK(13, 15)
+#define VST_TSEL_IVT 0
+#define VST_TSEL_SBE 1
+#define VST_TSEL_EQDT 2
+#define VST_TSEL_VPDT 3
+#define VST_TSEL_IRQ 4 /* VC only */
+#define VST_TABLE_BLOCK PPC_BITMASK(27, 31)
+
+/* Number of queue overflow pages */
+#define VC_QUEUE_OVF_COUNT 6
+
+/*
+ * Bits in a VSD entry.
+ *
+ * Note: the address is naturally aligned, we don't use a PPC_BITMASK,
+ * but just a mask to apply to the address before OR'ing it in.
+ *
+ * Note: VSD_FIRMWARE is a SW bit ! It hijacks an unused bit in the
+ * VSD and is only meant to be used in indirect mode !
+ */
+#define VSD_MODE PPC_BITMASK(0, 1)
+#define VSD_MODE_SHARED 1
+#define VSD_MODE_EXCLUSIVE 2
+#define VSD_MODE_FORWARD 3
+#define VSD_ADDRESS_MASK 0x0ffffffffffff000ull
+#define VSD_MIGRATION_REG PPC_BITMASK(52, 55)
+#define VSD_INDIRECT PPC_BIT(56)
+#define VSD_TSIZE PPC_BITMASK(59, 63)
+#define VSD_FIRMWARE PPC_BIT(2) /* Read warning above */
+
+#define VC_EQC_SYNC_MASK \
+ (VC_EQC_CONF_SYNC_IPI | \
+ VC_EQC_CONF_SYNC_HW | \
+ VC_EQC_CONF_SYNC_ESC1 | \
+ VC_EQC_CONF_SYNC_ESC2 | \
+ VC_EQC_CONF_SYNC_REDI)
+
+
+#endif /* PPC_PNV_XIVE_REGS_H */
diff --git a/hw/intc/ppc-uic.c b/hw/intc/ppc-uic.c
new file mode 100644
index 000000000..60013f2dd
--- /dev/null
+++ b/hw/intc/ppc-uic.c
@@ -0,0 +1,321 @@
+/*
+ * "Universal" Interrupt Controller for PowerPPC 4xx embedded processors
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/ppc-uic.h"
+#include "hw/irq.h"
+#include "cpu.h"
+#include "hw/ppc/ppc.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+
+enum {
+ DCR_UICSR = 0x000,
+ DCR_UICSRS = 0x001,
+ DCR_UICER = 0x002,
+ DCR_UICCR = 0x003,
+ DCR_UICPR = 0x004,
+ DCR_UICTR = 0x005,
+ DCR_UICMSR = 0x006,
+ DCR_UICVR = 0x007,
+ DCR_UICVCR = 0x008,
+ DCR_UICMAX = 0x009,
+};
+
+/*#define DEBUG_UIC*/
+
+#ifdef DEBUG_UIC
+# define LOG_UIC(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
+#else
+# define LOG_UIC(...) do { } while (0)
+#endif
+
+static void ppcuic_trigger_irq(PPCUIC *uic)
+{
+ uint32_t ir, cr;
+ int start, end, inc, i;
+
+ /* Trigger interrupt if any is pending */
+ ir = uic->uicsr & uic->uicer & (~uic->uiccr);
+ cr = uic->uicsr & uic->uicer & uic->uiccr;
+ LOG_UIC("%s: uicsr %08" PRIx32 " uicer %08" PRIx32
+ " uiccr %08" PRIx32 "\n"
+ " %08" PRIx32 " ir %08" PRIx32 " cr %08" PRIx32 "\n",
+ __func__, uic->uicsr, uic->uicer, uic->uiccr,
+ uic->uicsr & uic->uicer, ir, cr);
+ if (ir != 0x0000000) {
+ LOG_UIC("Raise UIC interrupt\n");
+ qemu_irq_raise(uic->output_int);
+ } else {
+ LOG_UIC("Lower UIC interrupt\n");
+ qemu_irq_lower(uic->output_int);
+ }
+ /* Trigger critical interrupt if any is pending and update vector */
+ if (cr != 0x0000000) {
+ qemu_irq_raise(uic->output_cint);
+ if (uic->use_vectors) {
+ /* Compute critical IRQ vector */
+ if (uic->uicvcr & 1) {
+ start = 31;
+ end = 0;
+ inc = -1;
+ } else {
+ start = 0;
+ end = 31;
+ inc = 1;
+ }
+ uic->uicvr = uic->uicvcr & 0xFFFFFFFC;
+ for (i = start; i <= end; i += inc) {
+ if (cr & (1 << i)) {
+ uic->uicvr += (i - start) * 512 * inc;
+ break;
+ }
+ }
+ }
+ LOG_UIC("Raise UIC critical interrupt - "
+ "vector %08" PRIx32 "\n", uic->uicvr);
+ } else {
+ LOG_UIC("Lower UIC critical interrupt\n");
+ qemu_irq_lower(uic->output_cint);
+ uic->uicvr = 0x00000000;
+ }
+}
+
+static void ppcuic_set_irq(void *opaque, int irq_num, int level)
+{
+ PPCUIC *uic;
+ uint32_t mask, sr;
+
+ uic = opaque;
+ mask = 1U << (31 - irq_num);
+ LOG_UIC("%s: irq %d level %d uicsr %08" PRIx32
+ " mask %08" PRIx32 " => %08" PRIx32 " %08" PRIx32 "\n",
+ __func__, irq_num, level,
+ uic->uicsr, mask, uic->uicsr & mask, level << irq_num);
+ if (irq_num < 0 || irq_num > 31) {
+ return;
+ }
+ sr = uic->uicsr;
+
+ /* Update status register */
+ if (uic->uictr & mask) {
+ /* Edge sensitive interrupt */
+ if (level == 1) {
+ uic->uicsr |= mask;
+ }
+ } else {
+ /* Level sensitive interrupt */
+ if (level == 1) {
+ uic->uicsr |= mask;
+ uic->level |= mask;
+ } else {
+ uic->uicsr &= ~mask;
+ uic->level &= ~mask;
+ }
+ }
+ LOG_UIC("%s: irq %d level %d sr %" PRIx32 " => "
+ "%08" PRIx32 "\n", __func__, irq_num, level, uic->uicsr, sr);
+ if (sr != uic->uicsr) {
+ ppcuic_trigger_irq(uic);
+ }
+}
+
+static uint32_t dcr_read_uic(void *opaque, int dcrn)
+{
+ PPCUIC *uic;
+ uint32_t ret;
+
+ uic = opaque;
+ dcrn -= uic->dcr_base;
+ switch (dcrn) {
+ case DCR_UICSR:
+ case DCR_UICSRS:
+ ret = uic->uicsr;
+ break;
+ case DCR_UICER:
+ ret = uic->uicer;
+ break;
+ case DCR_UICCR:
+ ret = uic->uiccr;
+ break;
+ case DCR_UICPR:
+ ret = uic->uicpr;
+ break;
+ case DCR_UICTR:
+ ret = uic->uictr;
+ break;
+ case DCR_UICMSR:
+ ret = uic->uicsr & uic->uicer;
+ break;
+ case DCR_UICVR:
+ if (!uic->use_vectors) {
+ goto no_read;
+ }
+ ret = uic->uicvr;
+ break;
+ case DCR_UICVCR:
+ if (!uic->use_vectors) {
+ goto no_read;
+ }
+ ret = uic->uicvcr;
+ break;
+ default:
+ no_read:
+ ret = 0x00000000;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_uic(void *opaque, int dcrn, uint32_t val)
+{
+ PPCUIC *uic;
+
+ uic = opaque;
+ dcrn -= uic->dcr_base;
+ LOG_UIC("%s: dcr %d val 0x%x\n", __func__, dcrn, val);
+ switch (dcrn) {
+ case DCR_UICSR:
+ uic->uicsr &= ~val;
+ uic->uicsr |= uic->level;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICSRS:
+ uic->uicsr |= val;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICER:
+ uic->uicer = val;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICCR:
+ uic->uiccr = val;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICPR:
+ uic->uicpr = val;
+ break;
+ case DCR_UICTR:
+ uic->uictr = val;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICMSR:
+ break;
+ case DCR_UICVR:
+ break;
+ case DCR_UICVCR:
+ uic->uicvcr = val & 0xFFFFFFFD;
+ ppcuic_trigger_irq(uic);
+ break;
+ }
+}
+
+static void ppc_uic_reset(DeviceState *dev)
+{
+ PPCUIC *uic = PPC_UIC(dev);
+
+ uic->uiccr = 0x00000000;
+ uic->uicer = 0x00000000;
+ uic->uicpr = 0x00000000;
+ uic->uicsr = 0x00000000;
+ uic->uictr = 0x00000000;
+ if (uic->use_vectors) {
+ uic->uicvcr = 0x00000000;
+ uic->uicvr = 0x0000000;
+ }
+}
+
+static void ppc_uic_realize(DeviceState *dev, Error **errp)
+{
+ PPCUIC *uic = PPC_UIC(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ PowerPCCPU *cpu;
+ int i;
+
+ if (!uic->cpu) {
+ /* This is a programming error in the code using this device */
+ error_setg(errp, "ppc-uic 'cpu' link property was not set");
+ return;
+ }
+
+ cpu = POWERPC_CPU(uic->cpu);
+ for (i = 0; i < DCR_UICMAX; i++) {
+ ppc_dcr_register(&cpu->env, uic->dcr_base + i, uic,
+ &dcr_read_uic, &dcr_write_uic);
+ }
+
+ sysbus_init_irq(sbd, &uic->output_int);
+ sysbus_init_irq(sbd, &uic->output_cint);
+ qdev_init_gpio_in(dev, ppcuic_set_irq, UIC_MAX_IRQ);
+}
+
+static Property ppc_uic_properties[] = {
+ DEFINE_PROP_LINK("cpu", PPCUIC, cpu, TYPE_CPU, CPUState *),
+ DEFINE_PROP_UINT32("dcr-base", PPCUIC, dcr_base, 0xc0),
+ DEFINE_PROP_BOOL("use-vectors", PPCUIC, use_vectors, true),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static const VMStateDescription ppc_uic_vmstate = {
+ .name = "ppc-uic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(level, PPCUIC),
+ VMSTATE_UINT32(uicsr, PPCUIC),
+ VMSTATE_UINT32(uicer, PPCUIC),
+ VMSTATE_UINT32(uiccr, PPCUIC),
+ VMSTATE_UINT32(uicpr, PPCUIC),
+ VMSTATE_UINT32(uictr, PPCUIC),
+ VMSTATE_UINT32(uicvcr, PPCUIC),
+ VMSTATE_UINT32(uicvr, PPCUIC),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void ppc_uic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = ppc_uic_reset;
+ dc->realize = ppc_uic_realize;
+ dc->vmsd = &ppc_uic_vmstate;
+ device_class_set_props(dc, ppc_uic_properties);
+}
+
+static const TypeInfo ppc_uic_info = {
+ .name = TYPE_PPC_UIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PPCUIC),
+ .class_init = ppc_uic_class_init,
+};
+
+static void ppc_uic_register_types(void)
+{
+ type_register_static(&ppc_uic_info);
+}
+
+type_init(ppc_uic_register_types);
diff --git a/hw/intc/realview_gic.c b/hw/intc/realview_gic.c
new file mode 100644
index 000000000..9b12116b2
--- /dev/null
+++ b/hw/intc/realview_gic.c
@@ -0,0 +1,86 @@
+/*
+ * ARM RealView Emulation Baseboard Interrupt Controller
+ *
+ * Copyright (c) 2006-2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "hw/intc/realview_gic.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+
+static void realview_gic_set_irq(void *opaque, int irq, int level)
+{
+ RealViewGICState *s = (RealViewGICState *)opaque;
+
+ qemu_set_irq(qdev_get_gpio_in(DEVICE(&s->gic), irq), level);
+}
+
+static void realview_gic_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ RealViewGICState *s = REALVIEW_GIC(dev);
+ SysBusDevice *busdev;
+ /* The GICs on the RealView boards have a fixed nonconfigurable
+ * number of interrupt lines, so we don't need to expose this as
+ * a qdev property.
+ */
+ int numirq = 96;
+
+ qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", numirq);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) {
+ return;
+ }
+ busdev = SYS_BUS_DEVICE(&s->gic);
+
+ /* Pass through outbound IRQ lines from the GIC */
+ sysbus_pass_irq(sbd, busdev);
+
+ /* Pass through inbound GPIO lines to the GIC */
+ qdev_init_gpio_in(dev, realview_gic_set_irq, numirq - 32);
+
+ memory_region_add_subregion(&s->container, 0,
+ sysbus_mmio_get_region(busdev, 1));
+ memory_region_add_subregion(&s->container, 0x1000,
+ sysbus_mmio_get_region(busdev, 0));
+}
+
+static void realview_gic_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ RealViewGICState *s = REALVIEW_GIC(obj);
+
+ memory_region_init(&s->container, OBJECT(s),
+ "realview-gic-container", 0x2000);
+ sysbus_init_mmio(sbd, &s->container);
+
+ object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC);
+ qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", 1);
+}
+
+static void realview_gic_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = realview_gic_realize;
+}
+
+static const TypeInfo realview_gic_info = {
+ .name = TYPE_REALVIEW_GIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(RealViewGICState),
+ .instance_init = realview_gic_init,
+ .class_init = realview_gic_class_init,
+};
+
+static void realview_gic_register_types(void)
+{
+ type_register_static(&realview_gic_info);
+}
+
+type_init(realview_gic_register_types)
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
new file mode 100644
index 000000000..f1a5d3d28
--- /dev/null
+++ b/hw/intc/riscv_aclint.c
@@ -0,0 +1,460 @@
+/*
+ * RISC-V ACLINT (Advanced Core Local Interruptor)
+ * URL: https://github.com/riscv/riscv-aclint
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017 SiFive, Inc.
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * This provides real-time clock, timer and interprocessor interrupts.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "hw/sysbus.h"
+#include "target/riscv/cpu.h"
+#include "hw/qdev-properties.h"
+#include "hw/intc/riscv_aclint.h"
+#include "qemu/timer.h"
+#include "hw/irq.h"
+
+typedef struct riscv_aclint_mtimer_callback {
+ RISCVAclintMTimerState *s;
+ int num;
+} riscv_aclint_mtimer_callback;
+
+static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq)
+{
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ timebase_freq, NANOSECONDS_PER_SECOND);
+}
+
+/*
+ * Called when timecmp is written to update the QEMU timer or immediately
+ * trigger timer interrupt if mtimecmp <= current timer value.
+ */
+static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
+ RISCVCPU *cpu,
+ int hartid,
+ uint64_t value,
+ uint32_t timebase_freq)
+{
+ uint64_t next;
+ uint64_t diff;
+
+ uint64_t rtc_r = cpu_riscv_read_rtc(timebase_freq);
+
+ cpu->env.timecmp = value;
+ if (cpu->env.timecmp <= rtc_r) {
+ /*
+ * If we're setting an MTIMECMP value in the "past",
+ * immediately raise the timer interrupt
+ */
+ qemu_irq_raise(mtimer->timer_irqs[hartid - mtimer->hartid_base]);
+ return;
+ }
+
+ /* otherwise, set up the future timer interrupt */
+ qemu_irq_lower(mtimer->timer_irqs[hartid - mtimer->hartid_base]);
+ diff = cpu->env.timecmp - rtc_r;
+ /* back to ns (note args switched in muldiv64) */
+ uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
+
+ /*
+ * check if ns_diff overflowed and check if the addition would potentially
+ * overflow
+ */
+ if ((NANOSECONDS_PER_SECOND > timebase_freq && ns_diff < diff) ||
+ ns_diff > INT64_MAX) {
+ next = INT64_MAX;
+ } else {
+ /*
+ * as it is very unlikely qemu_clock_get_ns will return a value
+ * greater than INT64_MAX, no additional check is needed for an
+ * unsigned integer overflow.
+ */
+ next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns_diff;
+ /*
+ * if ns_diff is INT64_MAX next may still be outside the range
+ * of a signed integer.
+ */
+ next = MIN(next, INT64_MAX);
+ }
+
+ timer_mod(cpu->env.timer, next);
+}
+
+/*
+ * Callback used when the timer set using timer_mod expires.
+ * Should raise the timer interrupt line
+ */
+static void riscv_aclint_mtimer_cb(void *opaque)
+{
+ riscv_aclint_mtimer_callback *state = opaque;
+
+ qemu_irq_raise(state->s->timer_irqs[state->num]);
+}
+
+/* CPU read MTIMER register */
+static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ RISCVAclintMTimerState *mtimer = opaque;
+
+ if (addr >= mtimer->timecmp_base &&
+ addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) {
+ size_t hartid = mtimer->hartid_base +
+ ((addr - mtimer->timecmp_base) >> 3);
+ CPUState *cpu = qemu_get_cpu(hartid);
+ CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ if (!env) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "aclint-mtimer: invalid hartid: %zu", hartid);
+ } else if ((addr & 0x7) == 0) {
+ /* timecmp_lo */
+ uint64_t timecmp = env->timecmp;
+ return timecmp & 0xFFFFFFFF;
+ } else if ((addr & 0x7) == 4) {
+ /* timecmp_hi */
+ uint64_t timecmp = env->timecmp;
+ return (timecmp >> 32) & 0xFFFFFFFF;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "aclint-mtimer: invalid read: %08x", (uint32_t)addr);
+ return 0;
+ }
+ } else if (addr == mtimer->time_base) {
+ /* time_lo */
+ return cpu_riscv_read_rtc(mtimer->timebase_freq) & 0xFFFFFFFF;
+ } else if (addr == mtimer->time_base + 4) {
+ /* time_hi */
+ return (cpu_riscv_read_rtc(mtimer->timebase_freq) >> 32) & 0xFFFFFFFF;
+ }
+
+ qemu_log_mask(LOG_UNIMP,
+ "aclint-mtimer: invalid read: %08x", (uint32_t)addr);
+ return 0;
+}
+
+/* CPU write MTIMER register */
+static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ RISCVAclintMTimerState *mtimer = opaque;
+
+ if (addr >= mtimer->timecmp_base &&
+ addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) {
+ size_t hartid = mtimer->hartid_base +
+ ((addr - mtimer->timecmp_base) >> 3);
+ CPUState *cpu = qemu_get_cpu(hartid);
+ CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ if (!env) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "aclint-mtimer: invalid hartid: %zu", hartid);
+ } else if ((addr & 0x7) == 0) {
+ /* timecmp_lo */
+ uint64_t timecmp_hi = env->timecmp >> 32;
+ riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid,
+ timecmp_hi << 32 | (value & 0xFFFFFFFF),
+ mtimer->timebase_freq);
+ return;
+ } else if ((addr & 0x7) == 4) {
+ /* timecmp_hi */
+ uint64_t timecmp_lo = env->timecmp;
+ riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid,
+ value << 32 | (timecmp_lo & 0xFFFFFFFF),
+ mtimer->timebase_freq);
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "aclint-mtimer: invalid timecmp write: %08x",
+ (uint32_t)addr);
+ }
+ return;
+ } else if (addr == mtimer->time_base) {
+ /* time_lo */
+ qemu_log_mask(LOG_UNIMP,
+ "aclint-mtimer: time_lo write not implemented");
+ return;
+ } else if (addr == mtimer->time_base + 4) {
+ /* time_hi */
+ qemu_log_mask(LOG_UNIMP,
+ "aclint-mtimer: time_hi write not implemented");
+ return;
+ }
+
+ qemu_log_mask(LOG_UNIMP,
+ "aclint-mtimer: invalid write: %08x", (uint32_t)addr);
+}
+
+static const MemoryRegionOps riscv_aclint_mtimer_ops = {
+ .read = riscv_aclint_mtimer_read,
+ .write = riscv_aclint_mtimer_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8
+ }
+};
+
+static Property riscv_aclint_mtimer_properties[] = {
+ DEFINE_PROP_UINT32("hartid-base", RISCVAclintMTimerState,
+ hartid_base, 0),
+ DEFINE_PROP_UINT32("num-harts", RISCVAclintMTimerState, num_harts, 1),
+ DEFINE_PROP_UINT32("timecmp-base", RISCVAclintMTimerState,
+ timecmp_base, RISCV_ACLINT_DEFAULT_MTIMECMP),
+ DEFINE_PROP_UINT32("time-base", RISCVAclintMTimerState,
+ time_base, RISCV_ACLINT_DEFAULT_MTIME),
+ DEFINE_PROP_UINT32("aperture-size", RISCVAclintMTimerState,
+ aperture_size, RISCV_ACLINT_DEFAULT_MTIMER_SIZE),
+ DEFINE_PROP_UINT32("timebase-freq", RISCVAclintMTimerState,
+ timebase_freq, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp)
+{
+ RISCVAclintMTimerState *s = RISCV_ACLINT_MTIMER(dev);
+ int i;
+
+ memory_region_init_io(&s->mmio, OBJECT(dev), &riscv_aclint_mtimer_ops,
+ s, TYPE_RISCV_ACLINT_MTIMER, s->aperture_size);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio);
+
+ s->timer_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts);
+ qdev_init_gpio_out(dev, s->timer_irqs, s->num_harts);
+
+ /* Claim timer interrupt bits */
+ for (i = 0; i < s->num_harts; i++) {
+ RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i));
+ if (riscv_cpu_claim_interrupts(cpu, MIP_MTIP) < 0) {
+ error_report("MTIP already claimed");
+ exit(1);
+ }
+ }
+}
+
+static void riscv_aclint_mtimer_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->realize = riscv_aclint_mtimer_realize;
+ device_class_set_props(dc, riscv_aclint_mtimer_properties);
+}
+
+static const TypeInfo riscv_aclint_mtimer_info = {
+ .name = TYPE_RISCV_ACLINT_MTIMER,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(RISCVAclintMTimerState),
+ .class_init = riscv_aclint_mtimer_class_init,
+};
+
+/*
+ * Create ACLINT MTIMER device.
+ */
+DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size,
+ uint32_t hartid_base, uint32_t num_harts,
+ uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq,
+ bool provide_rdtime)
+{
+ int i;
+ DeviceState *dev = qdev_new(TYPE_RISCV_ACLINT_MTIMER);
+
+ assert(num_harts <= RISCV_ACLINT_MAX_HARTS);
+ assert(!(addr & 0x7));
+ assert(!(timecmp_base & 0x7));
+ assert(!(time_base & 0x7));
+
+ qdev_prop_set_uint32(dev, "hartid-base", hartid_base);
+ qdev_prop_set_uint32(dev, "num-harts", num_harts);
+ qdev_prop_set_uint32(dev, "timecmp-base", timecmp_base);
+ qdev_prop_set_uint32(dev, "time-base", time_base);
+ qdev_prop_set_uint32(dev, "aperture-size", size);
+ qdev_prop_set_uint32(dev, "timebase-freq", timebase_freq);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
+
+ for (i = 0; i < num_harts; i++) {
+ CPUState *cpu = qemu_get_cpu(hartid_base + i);
+ RISCVCPU *rvcpu = RISCV_CPU(cpu);
+ CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ riscv_aclint_mtimer_callback *cb =
+ g_malloc0(sizeof(riscv_aclint_mtimer_callback));
+
+ if (!env) {
+ g_free(cb);
+ continue;
+ }
+ if (provide_rdtime) {
+ riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, timebase_freq);
+ }
+
+ cb->s = RISCV_ACLINT_MTIMER(dev);
+ cb->num = i;
+ env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ &riscv_aclint_mtimer_cb, cb);
+ env->timecmp = 0;
+
+ qdev_connect_gpio_out(dev, i,
+ qdev_get_gpio_in(DEVICE(rvcpu), IRQ_M_TIMER));
+ }
+
+ return dev;
+}
+
+/* CPU read [M|S]SWI register */
+static uint64_t riscv_aclint_swi_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ RISCVAclintSwiState *swi = opaque;
+
+ if (addr < (swi->num_harts << 2)) {
+ size_t hartid = swi->hartid_base + (addr >> 2);
+ CPUState *cpu = qemu_get_cpu(hartid);
+ CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ if (!env) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "aclint-swi: invalid hartid: %zu", hartid);
+ } else if ((addr & 0x3) == 0) {
+ return (swi->sswi) ? 0 : ((env->mip & MIP_MSIP) > 0);
+ }
+ }
+
+ qemu_log_mask(LOG_UNIMP,
+ "aclint-swi: invalid read: %08x", (uint32_t)addr);
+ return 0;
+}
+
+/* CPU write [M|S]SWI register */
+static void riscv_aclint_swi_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+ RISCVAclintSwiState *swi = opaque;
+
+ if (addr < (swi->num_harts << 2)) {
+ size_t hartid = swi->hartid_base + (addr >> 2);
+ CPUState *cpu = qemu_get_cpu(hartid);
+ CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ if (!env) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "aclint-swi: invalid hartid: %zu", hartid);
+ } else if ((addr & 0x3) == 0) {
+ if (value & 0x1) {
+ qemu_irq_raise(swi->soft_irqs[hartid - swi->hartid_base]);
+ } else {
+ if (!swi->sswi) {
+ qemu_irq_lower(swi->soft_irqs[hartid - swi->hartid_base]);
+ }
+ }
+ return;
+ }
+ }
+
+ qemu_log_mask(LOG_UNIMP,
+ "aclint-swi: invalid write: %08x", (uint32_t)addr);
+}
+
+static const MemoryRegionOps riscv_aclint_swi_ops = {
+ .read = riscv_aclint_swi_read,
+ .write = riscv_aclint_swi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4
+ }
+};
+
+static Property riscv_aclint_swi_properties[] = {
+ DEFINE_PROP_UINT32("hartid-base", RISCVAclintSwiState, hartid_base, 0),
+ DEFINE_PROP_UINT32("num-harts", RISCVAclintSwiState, num_harts, 1),
+ DEFINE_PROP_UINT32("sswi", RISCVAclintSwiState, sswi, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void riscv_aclint_swi_realize(DeviceState *dev, Error **errp)
+{
+ RISCVAclintSwiState *swi = RISCV_ACLINT_SWI(dev);
+ int i;
+
+ memory_region_init_io(&swi->mmio, OBJECT(dev), &riscv_aclint_swi_ops, swi,
+ TYPE_RISCV_ACLINT_SWI, RISCV_ACLINT_SWI_SIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &swi->mmio);
+
+ swi->soft_irqs = g_malloc(sizeof(qemu_irq) * swi->num_harts);
+ qdev_init_gpio_out(dev, swi->soft_irqs, swi->num_harts);
+
+ /* Claim software interrupt bits */
+ for (i = 0; i < swi->num_harts; i++) {
+ RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(swi->hartid_base + i));
+ /* We don't claim mip.SSIP because it is writeable by software */
+ if (riscv_cpu_claim_interrupts(cpu, swi->sswi ? 0 : MIP_MSIP) < 0) {
+ error_report("MSIP already claimed");
+ exit(1);
+ }
+ }
+}
+
+static void riscv_aclint_swi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->realize = riscv_aclint_swi_realize;
+ device_class_set_props(dc, riscv_aclint_swi_properties);
+}
+
+static const TypeInfo riscv_aclint_swi_info = {
+ .name = TYPE_RISCV_ACLINT_SWI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(RISCVAclintSwiState),
+ .class_init = riscv_aclint_swi_class_init,
+};
+
+/*
+ * Create ACLINT [M|S]SWI device.
+ */
+DeviceState *riscv_aclint_swi_create(hwaddr addr, uint32_t hartid_base,
+ uint32_t num_harts, bool sswi)
+{
+ int i;
+ DeviceState *dev = qdev_new(TYPE_RISCV_ACLINT_SWI);
+
+ assert(num_harts <= RISCV_ACLINT_MAX_HARTS);
+ assert(!(addr & 0x3));
+
+ qdev_prop_set_uint32(dev, "hartid-base", hartid_base);
+ qdev_prop_set_uint32(dev, "num-harts", num_harts);
+ qdev_prop_set_uint32(dev, "sswi", sswi ? true : false);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
+
+ for (i = 0; i < num_harts; i++) {
+ CPUState *cpu = qemu_get_cpu(hartid_base + i);
+ RISCVCPU *rvcpu = RISCV_CPU(cpu);
+
+ qdev_connect_gpio_out(dev, i,
+ qdev_get_gpio_in(DEVICE(rvcpu),
+ (sswi) ? IRQ_S_SOFT : IRQ_M_SOFT));
+ }
+
+ return dev;
+}
+
+static void riscv_aclint_register_types(void)
+{
+ type_register_static(&riscv_aclint_mtimer_info);
+ type_register_static(&riscv_aclint_swi_info);
+}
+
+type_init(riscv_aclint_register_types)
diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c
new file mode 100644
index 000000000..e5c01807b
--- /dev/null
+++ b/hw/intc/rx_icu.c
@@ -0,0 +1,395 @@
+/*
+ * RX Interrupt Control Unit
+ *
+ * Warning: Only ICUa is supported.
+ *
+ * Datasheet: RX62N Group, RX621 Group User's Manual: Hardware
+ * (Rev.1.40 R01UH0033EJ0140)
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "hw/irq.h"
+#include "hw/registerfields.h"
+#include "hw/qdev-properties.h"
+#include "hw/intc/rx_icu.h"
+#include "migration/vmstate.h"
+
+REG8(IR, 0)
+ FIELD(IR, IR, 0, 1)
+REG8(DTCER, 0x100)
+ FIELD(DTCER, DTCE, 0, 1)
+REG8(IER, 0x200)
+REG8(SWINTR, 0x2e0)
+ FIELD(SWINTR, SWINT, 0, 1)
+REG16(FIR, 0x2f0)
+ FIELD(FIR, FVCT, 0, 8)
+ FIELD(FIR, FIEN, 15, 1)
+REG8(IPR, 0x300)
+ FIELD(IPR, IPR, 0, 4)
+REG8(DMRSR, 0x400)
+REG8(IRQCR, 0x500)
+ FIELD(IRQCR, IRQMD, 2, 2)
+REG8(NMISR, 0x580)
+ FIELD(NMISR, NMIST, 0, 1)
+ FIELD(NMISR, LVDST, 1, 1)
+ FIELD(NMISR, OSTST, 2, 1)
+REG8(NMIER, 0x581)
+ FIELD(NMIER, NMIEN, 0, 1)
+ FIELD(NMIER, LVDEN, 1, 1)
+ FIELD(NMIER, OSTEN, 2, 1)
+REG8(NMICLR, 0x582)
+ FIELD(NMICLR, NMICLR, 0, 1)
+ FIELD(NMICLR, OSTCLR, 2, 1)
+REG8(NMICR, 0x583)
+ FIELD(NMICR, NMIMD, 3, 1)
+
+static void set_irq(RXICUState *icu, int n_IRQ, int req)
+{
+ if ((icu->fir & R_FIR_FIEN_MASK) &&
+ (icu->fir & R_FIR_FVCT_MASK) == n_IRQ) {
+ qemu_set_irq(icu->_fir, req);
+ } else {
+ qemu_set_irq(icu->_irq, req);
+ }
+}
+
+static uint16_t rxicu_level(RXICUState *icu, unsigned n)
+{
+ return (icu->ipr[icu->map[n]] << 8) | n;
+}
+
+static void rxicu_request(RXICUState *icu, int n_IRQ)
+{
+ int enable;
+
+ enable = icu->ier[n_IRQ / 8] & (1 << (n_IRQ & 7));
+ if (n_IRQ > 0 && enable != 0 && qatomic_read(&icu->req_irq) < 0) {
+ qatomic_set(&icu->req_irq, n_IRQ);
+ set_irq(icu, n_IRQ, rxicu_level(icu, n_IRQ));
+ }
+}
+
+static void rxicu_set_irq(void *opaque, int n_IRQ, int level)
+{
+ RXICUState *icu = opaque;
+ struct IRQSource *src;
+ int issue;
+
+ if (n_IRQ >= NR_IRQS) {
+ error_report("%s: IRQ %d out of range", __func__, n_IRQ);
+ return;
+ }
+
+ src = &icu->src[n_IRQ];
+
+ level = (level != 0);
+ switch (src->sense) {
+ case TRG_LEVEL:
+ /* level-sensitive irq */
+ issue = level;
+ src->level = level;
+ break;
+ case TRG_NEDGE:
+ issue = (level == 0 && src->level == 1);
+ src->level = level;
+ break;
+ case TRG_PEDGE:
+ issue = (level == 1 && src->level == 0);
+ src->level = level;
+ break;
+ case TRG_BEDGE:
+ issue = ((level ^ src->level) & 1);
+ src->level = level;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (issue == 0 && src->sense == TRG_LEVEL) {
+ icu->ir[n_IRQ] = 0;
+ if (qatomic_read(&icu->req_irq) == n_IRQ) {
+ /* clear request */
+ set_irq(icu, n_IRQ, 0);
+ qatomic_set(&icu->req_irq, -1);
+ }
+ return;
+ }
+ if (issue) {
+ icu->ir[n_IRQ] = 1;
+ rxicu_request(icu, n_IRQ);
+ }
+}
+
+static void rxicu_ack_irq(void *opaque, int no, int level)
+{
+ RXICUState *icu = opaque;
+ int i;
+ int n_IRQ;
+ int max_pri;
+
+ n_IRQ = qatomic_read(&icu->req_irq);
+ if (n_IRQ < 0) {
+ return;
+ }
+ qatomic_set(&icu->req_irq, -1);
+ if (icu->src[n_IRQ].sense != TRG_LEVEL) {
+ icu->ir[n_IRQ] = 0;
+ }
+
+ max_pri = 0;
+ n_IRQ = -1;
+ for (i = 0; i < NR_IRQS; i++) {
+ if (icu->ir[i]) {
+ if (max_pri < icu->ipr[icu->map[i]]) {
+ n_IRQ = i;
+ max_pri = icu->ipr[icu->map[i]];
+ }
+ }
+ }
+
+ if (n_IRQ >= 0) {
+ rxicu_request(icu, n_IRQ);
+ }
+}
+
+static uint64_t icu_read(void *opaque, hwaddr addr, unsigned size)
+{
+ RXICUState *icu = opaque;
+ int reg = addr & 0xff;
+
+ if ((addr != A_FIR && size != 1) ||
+ (addr == A_FIR && size != 2)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "rx_icu: Invalid read size 0x%"
+ HWADDR_PRIX "\n",
+ addr);
+ return UINT64_MAX;
+ }
+ switch (addr) {
+ case A_IR ... A_IR + 0xff:
+ return icu->ir[reg] & R_IR_IR_MASK;
+ case A_DTCER ... A_DTCER + 0xff:
+ return icu->dtcer[reg] & R_DTCER_DTCE_MASK;
+ case A_IER ... A_IER + 0x1f:
+ return icu->ier[reg];
+ case A_SWINTR:
+ return 0;
+ case A_FIR:
+ return icu->fir & (R_FIR_FIEN_MASK | R_FIR_FVCT_MASK);
+ case A_IPR ... A_IPR + 0x8f:
+ return icu->ipr[reg] & R_IPR_IPR_MASK;
+ case A_DMRSR:
+ case A_DMRSR + 4:
+ case A_DMRSR + 8:
+ case A_DMRSR + 12:
+ return icu->dmasr[reg >> 2];
+ case A_IRQCR ... A_IRQCR + 0x1f:
+ return icu->src[64 + reg].sense << R_IRQCR_IRQMD_SHIFT;
+ case A_NMISR:
+ case A_NMICLR:
+ return 0;
+ case A_NMIER:
+ return icu->nmier;
+ case A_NMICR:
+ return icu->nmicr;
+ default:
+ qemu_log_mask(LOG_UNIMP, "rx_icu: Register 0x%" HWADDR_PRIX " "
+ "not implemented.\n",
+ addr);
+ break;
+ }
+ return UINT64_MAX;
+}
+
+static void icu_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+ RXICUState *icu = opaque;
+ int reg = addr & 0xff;
+
+ if ((addr != A_FIR && size != 1) ||
+ (addr == A_FIR && size != 2)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "rx_icu: Invalid write size at "
+ "0x%" HWADDR_PRIX "\n",
+ addr);
+ return;
+ }
+ switch (addr) {
+ case A_IR ... A_IR + 0xff:
+ if (icu->src[reg].sense != TRG_LEVEL && val == 0) {
+ icu->ir[reg] = 0;
+ }
+ break;
+ case A_DTCER ... A_DTCER + 0xff:
+ icu->dtcer[reg] = val & R_DTCER_DTCE_MASK;
+ qemu_log_mask(LOG_UNIMP, "rx_icu: DTC not implemented\n");
+ break;
+ case A_IER ... A_IER + 0x1f:
+ icu->ier[reg] = val;
+ break;
+ case A_SWINTR:
+ if (val & R_SWINTR_SWINT_MASK) {
+ qemu_irq_pulse(icu->_swi);
+ }
+ break;
+ case A_FIR:
+ icu->fir = val & (R_FIR_FIEN_MASK | R_FIR_FVCT_MASK);
+ break;
+ case A_IPR ... A_IPR + 0x8f:
+ icu->ipr[reg] = val & R_IPR_IPR_MASK;
+ break;
+ case A_DMRSR:
+ case A_DMRSR + 4:
+ case A_DMRSR + 8:
+ case A_DMRSR + 12:
+ icu->dmasr[reg >> 2] = val;
+ qemu_log_mask(LOG_UNIMP, "rx_icu: DMAC not implemented\n");
+ break;
+ case A_IRQCR ... A_IRQCR + 0x1f:
+ icu->src[64 + reg].sense = val >> R_IRQCR_IRQMD_SHIFT;
+ break;
+ case A_NMICLR:
+ break;
+ case A_NMIER:
+ icu->nmier |= val & (R_NMIER_NMIEN_MASK |
+ R_NMIER_LVDEN_MASK |
+ R_NMIER_OSTEN_MASK);
+ break;
+ case A_NMICR:
+ if ((icu->nmier & R_NMIER_NMIEN_MASK) == 0) {
+ icu->nmicr = val & R_NMICR_NMIMD_MASK;
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "rx_icu: Register 0x%" HWADDR_PRIX " "
+ "not implemented\n",
+ addr);
+ break;
+ }
+}
+
+static const MemoryRegionOps icu_ops = {
+ .write = icu_write,
+ .read = icu_read,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 2,
+ },
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 2,
+ },
+};
+
+static void rxicu_realize(DeviceState *dev, Error **errp)
+{
+ RXICUState *icu = RX_ICU(dev);
+ int i;
+
+ if (icu->init_sense == NULL) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "rx_icu: trigger-level property must be set.");
+ return;
+ }
+
+ for (i = 0; i < NR_IRQS; i++) {
+ icu->src[i].sense = TRG_PEDGE;
+ }
+ for (i = 0; i < icu->nr_sense; i++) {
+ uint8_t irqno = icu->init_sense[i];
+ icu->src[irqno].sense = TRG_LEVEL;
+ }
+ icu->req_irq = -1;
+}
+
+static void rxicu_init(Object *obj)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(obj);
+ RXICUState *icu = RX_ICU(obj);
+
+ memory_region_init_io(&icu->memory, OBJECT(icu), &icu_ops,
+ icu, "rx-icu", 0x600);
+ sysbus_init_mmio(d, &icu->memory);
+
+ qdev_init_gpio_in(DEVICE(d), rxicu_set_irq, NR_IRQS);
+ qdev_init_gpio_in_named(DEVICE(d), rxicu_ack_irq, "ack", 1);
+ sysbus_init_irq(d, &icu->_irq);
+ sysbus_init_irq(d, &icu->_fir);
+ sysbus_init_irq(d, &icu->_swi);
+}
+
+static void rxicu_fini(Object *obj)
+{
+ RXICUState *icu = RX_ICU(obj);
+ g_free(icu->map);
+ g_free(icu->init_sense);
+}
+
+static const VMStateDescription vmstate_rxicu = {
+ .name = "rx-icu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(ir, RXICUState, NR_IRQS),
+ VMSTATE_UINT8_ARRAY(dtcer, RXICUState, NR_IRQS),
+ VMSTATE_UINT8_ARRAY(ier, RXICUState, NR_IRQS / 8),
+ VMSTATE_UINT8_ARRAY(ipr, RXICUState, 142),
+ VMSTATE_UINT8_ARRAY(dmasr, RXICUState, 4),
+ VMSTATE_UINT16(fir, RXICUState),
+ VMSTATE_UINT8(nmisr, RXICUState),
+ VMSTATE_UINT8(nmier, RXICUState),
+ VMSTATE_UINT8(nmiclr, RXICUState),
+ VMSTATE_UINT8(nmicr, RXICUState),
+ VMSTATE_INT16(req_irq, RXICUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property rxicu_properties[] = {
+ DEFINE_PROP_ARRAY("ipr-map", RXICUState, nr_irqs, map,
+ qdev_prop_uint8, uint8_t),
+ DEFINE_PROP_ARRAY("trigger-level", RXICUState, nr_sense, init_sense,
+ qdev_prop_uint8, uint8_t),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void rxicu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = rxicu_realize;
+ dc->vmsd = &vmstate_rxicu;
+ device_class_set_props(dc, rxicu_properties);
+}
+
+static const TypeInfo rxicu_info = {
+ .name = TYPE_RX_ICU,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(RXICUState),
+ .instance_init = rxicu_init,
+ .instance_finalize = rxicu_fini,
+ .class_init = rxicu_class_init,
+};
+
+static void rxicu_register_types(void)
+{
+ type_register_static(&rxicu_info);
+}
+
+type_init(rxicu_register_types)
diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c
new file mode 100644
index 000000000..74e02858d
--- /dev/null
+++ b/hw/intc/s390_flic.c
@@ -0,0 +1,503 @@
+/*
+ * QEMU S390x floating interrupt controller (flic)
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Jens Freimann <jfrei@linux.vnet.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/module.h"
+#include "hw/sysbus.h"
+#include "hw/s390x/ioinst.h"
+#include "hw/s390x/s390_flic.h"
+#include "hw/qdev-properties.h"
+#include "hw/s390x/css.h"
+#include "trace.h"
+#include "qapi/error.h"
+#include "hw/s390x/s390-virtio-ccw.h"
+
+S390FLICStateClass *s390_get_flic_class(S390FLICState *fs)
+{
+ static S390FLICStateClass *class;
+
+ if (!class) {
+ /* we only have one flic device, so this is fine to cache */
+ class = S390_FLIC_COMMON_GET_CLASS(fs);
+ }
+ return class;
+}
+
+QEMUS390FLICState *s390_get_qemu_flic(S390FLICState *fs)
+{
+ static QEMUS390FLICState *flic;
+
+ if (!flic) {
+ /* we only have one flic device, so this is fine to cache */
+ flic = QEMU_S390_FLIC(fs);
+ }
+ return flic;
+}
+
+S390FLICState *s390_get_flic(void)
+{
+ static S390FLICState *fs;
+
+ if (!fs) {
+ fs = S390_FLIC_COMMON(object_resolve_path_type("",
+ TYPE_S390_FLIC_COMMON,
+ NULL));
+ }
+ return fs;
+}
+
+void s390_flic_init(void)
+{
+ DeviceState *dev;
+
+ if (kvm_enabled()) {
+ dev = qdev_new(TYPE_KVM_S390_FLIC);
+ object_property_add_child(qdev_get_machine(), TYPE_KVM_S390_FLIC,
+ OBJECT(dev));
+ } else {
+ dev = qdev_new(TYPE_QEMU_S390_FLIC);
+ object_property_add_child(qdev_get_machine(), TYPE_QEMU_S390_FLIC,
+ OBJECT(dev));
+ }
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+}
+
+static int qemu_s390_register_io_adapter(S390FLICState *fs, uint32_t id,
+ uint8_t isc, bool swap,
+ bool is_maskable, uint8_t flags)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static int qemu_s390_io_adapter_map(S390FLICState *fs, uint32_t id,
+ uint64_t map_addr, bool do_map)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static int qemu_s390_add_adapter_routes(S390FLICState *fs,
+ AdapterRoutes *routes)
+{
+ return -ENOSYS;
+}
+
+static void qemu_s390_release_adapter_routes(S390FLICState *fs,
+ AdapterRoutes *routes)
+{
+}
+
+static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
+ uint16_t subchannel_nr)
+{
+ QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
+ QEMUS390FlicIO *cur, *next;
+ uint8_t isc;
+
+ g_assert(qemu_mutex_iothread_locked());
+ if (!(flic->pending & FLIC_PENDING_IO)) {
+ return 0;
+ }
+
+ /* check all iscs */
+ for (isc = 0; isc < 8; isc++) {
+ if (QLIST_EMPTY(&flic->io[isc])) {
+ continue;
+ }
+
+ /* search and delete any matching one */
+ QLIST_FOREACH_SAFE(cur, &flic->io[isc], next, next) {
+ if (cur->id == subchannel_id && cur->nr == subchannel_nr) {
+ QLIST_REMOVE(cur, next);
+ g_free(cur);
+ }
+ }
+
+ /* update our indicator bit */
+ if (QLIST_EMPTY(&flic->io[isc])) {
+ flic->pending &= ~ISC_TO_PENDING_IO(isc);
+ }
+ }
+ return 0;
+}
+
+static int qemu_s390_modify_ais_mode(S390FLICState *fs, uint8_t isc,
+ uint16_t mode)
+{
+ QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
+
+ switch (mode) {
+ case SIC_IRQ_MODE_ALL:
+ flic->simm &= ~AIS_MODE_MASK(isc);
+ flic->nimm &= ~AIS_MODE_MASK(isc);
+ break;
+ case SIC_IRQ_MODE_SINGLE:
+ flic->simm |= AIS_MODE_MASK(isc);
+ flic->nimm &= ~AIS_MODE_MASK(isc);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qemu_s390_inject_airq(S390FLICState *fs, uint8_t type,
+ uint8_t isc, uint8_t flags)
+{
+ QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
+ S390FLICStateClass *fsc = s390_get_flic_class(fs);
+ bool flag = flags & S390_ADAPTER_SUPPRESSIBLE;
+ uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
+
+ if (flag && (flic->nimm & AIS_MODE_MASK(isc))) {
+ trace_qemu_s390_airq_suppressed(type, isc);
+ return 0;
+ }
+
+ fsc->inject_io(fs, 0, 0, 0, io_int_word);
+
+ if (flag && (flic->simm & AIS_MODE_MASK(isc))) {
+ flic->nimm |= AIS_MODE_MASK(isc);
+ trace_qemu_s390_suppress_airq(isc, "Single-Interruption Mode",
+ "NO-Interruptions Mode");
+ }
+
+ return 0;
+}
+
+static void qemu_s390_flic_notify(uint32_t type)
+{
+ CPUState *cs;
+
+ /*
+ * We have to make all CPUs see CPU_INTERRUPT_HARD, so they might
+ * consider it. We will kick all running CPUs and only relevant
+ * sleeping ones.
+ */
+ CPU_FOREACH(cs) {
+ S390CPU *cpu = S390_CPU(cs);
+
+ cs->interrupt_request |= CPU_INTERRUPT_HARD;
+
+ /* ignore CPUs that are not sleeping */
+ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING &&
+ s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD) {
+ continue;
+ }
+
+ /* we always kick running CPUs for now, this is tricky */
+ if (cs->halted) {
+ /* don't check for subclasses, CPUs double check when waking up */
+ if (type & FLIC_PENDING_SERVICE) {
+ if (!(cpu->env.psw.mask & PSW_MASK_EXT)) {
+ continue;
+ }
+ } else if (type & FLIC_PENDING_IO) {
+ if (!(cpu->env.psw.mask & PSW_MASK_IO)) {
+ continue;
+ }
+ } else if (type & FLIC_PENDING_MCHK_CR) {
+ if (!(cpu->env.psw.mask & PSW_MASK_MCHECK)) {
+ continue;
+ }
+ }
+ }
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic)
+{
+ uint32_t tmp;
+
+ g_assert(qemu_mutex_iothread_locked());
+ g_assert(flic->pending & FLIC_PENDING_SERVICE);
+ tmp = flic->service_param;
+ flic->service_param = 0;
+ flic->pending &= ~FLIC_PENDING_SERVICE;
+
+ return tmp;
+}
+
+/* caller has to free the returned object */
+QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
+{
+ QEMUS390FlicIO *io;
+ uint8_t isc;
+
+ g_assert(qemu_mutex_iothread_locked());
+ if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) {
+ return NULL;
+ }
+
+ for (isc = 0; isc < 8; isc++) {
+ if (QLIST_EMPTY(&flic->io[isc]) || !(cr6 & ISC_TO_ISC_BITS(isc))) {
+ continue;
+ }
+ io = QLIST_FIRST(&flic->io[isc]);
+ QLIST_REMOVE(io, next);
+
+ /* update our indicator bit */
+ if (QLIST_EMPTY(&flic->io[isc])) {
+ flic->pending &= ~ISC_TO_PENDING_IO(isc);
+ }
+ return io;
+ }
+
+ return NULL;
+}
+
+void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic)
+{
+ g_assert(qemu_mutex_iothread_locked());
+ g_assert(flic->pending & FLIC_PENDING_MCHK_CR);
+ flic->pending &= ~FLIC_PENDING_MCHK_CR;
+}
+
+static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm)
+{
+ QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
+
+ g_assert(qemu_mutex_iothread_locked());
+ /* multiplexing is good enough for sclp - kvm does it internally as well */
+ flic->service_param |= parm;
+ flic->pending |= FLIC_PENDING_SERVICE;
+
+ qemu_s390_flic_notify(FLIC_PENDING_SERVICE);
+}
+
+static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
+ uint16_t subchannel_nr, uint32_t io_int_parm,
+ uint32_t io_int_word)
+{
+ const uint8_t isc = IO_INT_WORD_ISC(io_int_word);
+ QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
+ QEMUS390FlicIO *io;
+
+ g_assert(qemu_mutex_iothread_locked());
+ io = g_new0(QEMUS390FlicIO, 1);
+ io->id = subchannel_id;
+ io->nr = subchannel_nr;
+ io->parm = io_int_parm;
+ io->word = io_int_word;
+
+ QLIST_INSERT_HEAD(&flic->io[isc], io, next);
+ flic->pending |= ISC_TO_PENDING_IO(isc);
+
+ qemu_s390_flic_notify(ISC_TO_PENDING_IO(isc));
+}
+
+static void qemu_s390_inject_crw_mchk(S390FLICState *fs)
+{
+ QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
+
+ g_assert(qemu_mutex_iothread_locked());
+ flic->pending |= FLIC_PENDING_MCHK_CR;
+
+ qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR);
+}
+
+bool qemu_s390_flic_has_service(QEMUS390FLICState *flic)
+{
+ /* called without lock via cc->has_work, will be validated under lock */
+ return !!(flic->pending & FLIC_PENDING_SERVICE);
+}
+
+bool qemu_s390_flic_has_io(QEMUS390FLICState *flic, uint64_t cr6)
+{
+ /* called without lock via cc->has_work, will be validated under lock */
+ return !!(flic->pending & CR6_TO_PENDING_IO(cr6));
+}
+
+bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic)
+{
+ /* called without lock via cc->has_work, will be validated under lock */
+ return !!(flic->pending & FLIC_PENDING_MCHK_CR);
+}
+
+bool qemu_s390_flic_has_any(QEMUS390FLICState *flic)
+{
+ g_assert(qemu_mutex_iothread_locked());
+ return !!flic->pending;
+}
+
+static void qemu_s390_flic_reset(DeviceState *dev)
+{
+ QEMUS390FLICState *flic = QEMU_S390_FLIC(dev);
+ QEMUS390FlicIO *cur, *next;
+ int isc;
+
+ g_assert(qemu_mutex_iothread_locked());
+ flic->simm = 0;
+ flic->nimm = 0;
+ flic->pending = 0;
+
+ /* remove all pending io interrupts */
+ for (isc = 0; isc < 8; isc++) {
+ QLIST_FOREACH_SAFE(cur, &flic->io[isc], next, next) {
+ QLIST_REMOVE(cur, next);
+ g_free(cur);
+ }
+ }
+}
+
+bool ais_needed(void *opaque)
+{
+ S390FLICState *s = opaque;
+
+ return s->ais_supported;
+}
+
+static const VMStateDescription qemu_s390_flic_vmstate = {
+ .name = "qemu-s390-flic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ais_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(simm, QEMUS390FLICState),
+ VMSTATE_UINT8(nimm, QEMUS390FLICState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void qemu_s390_flic_instance_init(Object *obj)
+{
+ QEMUS390FLICState *flic = QEMU_S390_FLIC(obj);
+ int isc;
+
+ for (isc = 0; isc < 8; isc++) {
+ QLIST_INIT(&flic->io[isc]);
+ }
+}
+
+static void qemu_s390_flic_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc);
+
+ dc->reset = qemu_s390_flic_reset;
+ dc->vmsd = &qemu_s390_flic_vmstate;
+ fsc->register_io_adapter = qemu_s390_register_io_adapter;
+ fsc->io_adapter_map = qemu_s390_io_adapter_map;
+ fsc->add_adapter_routes = qemu_s390_add_adapter_routes;
+ fsc->release_adapter_routes = qemu_s390_release_adapter_routes;
+ fsc->clear_io_irq = qemu_s390_clear_io_flic;
+ fsc->modify_ais_mode = qemu_s390_modify_ais_mode;
+ fsc->inject_airq = qemu_s390_inject_airq;
+ fsc->inject_service = qemu_s390_inject_service;
+ fsc->inject_io = qemu_s390_inject_io;
+ fsc->inject_crw_mchk = qemu_s390_inject_crw_mchk;
+}
+
+static Property s390_flic_common_properties[] = {
+ DEFINE_PROP_UINT32("adapter_routes_max_batch", S390FLICState,
+ adapter_routes_max_batch, ADAPTER_ROUTES_MAX_GSI),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void s390_flic_common_realize(DeviceState *dev, Error **errp)
+{
+ S390FLICState *fs = S390_FLIC_COMMON(dev);
+ uint32_t max_batch = fs->adapter_routes_max_batch;
+
+ if (max_batch > ADAPTER_ROUTES_MAX_GSI) {
+ error_setg(errp, "flic property adapter_routes_max_batch too big"
+ " (%d > %d)", max_batch, ADAPTER_ROUTES_MAX_GSI);
+ return;
+ }
+
+ fs->ais_supported = s390_has_feat(S390_FEAT_ADAPTER_INT_SUPPRESSION);
+}
+
+static void s390_flic_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ device_class_set_props(dc, s390_flic_common_properties);
+ dc->realize = s390_flic_common_realize;
+}
+
+static const TypeInfo qemu_s390_flic_info = {
+ .name = TYPE_QEMU_S390_FLIC,
+ .parent = TYPE_S390_FLIC_COMMON,
+ .instance_size = sizeof(QEMUS390FLICState),
+ .instance_init = qemu_s390_flic_instance_init,
+ .class_init = qemu_s390_flic_class_init,
+};
+
+
+static const TypeInfo s390_flic_common_info = {
+ .name = TYPE_S390_FLIC_COMMON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(S390FLICState),
+ .class_init = s390_flic_class_init,
+ .class_size = sizeof(S390FLICStateClass),
+};
+
+static void qemu_s390_flic_register_types(void)
+{
+ type_register_static(&s390_flic_common_info);
+ type_register_static(&qemu_s390_flic_info);
+}
+
+type_init(qemu_s390_flic_register_types)
+
+static bool adapter_info_so_needed(void *opaque)
+{
+ return css_migration_enabled();
+}
+
+const VMStateDescription vmstate_adapter_info_so = {
+ .name = "s390_adapter_info/summary_offset",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = adapter_info_so_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(summary_offset, AdapterInfo),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_adapter_info = {
+ .name = "s390_adapter_info",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ind_offset, AdapterInfo),
+ /*
+ * We do not have to migrate neither the id nor the addresses.
+ * The id is set by css_register_io_adapter and the addresses
+ * are set based on the IndAddr objects after those get mapped.
+ */
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_adapter_info_so,
+ NULL
+ }
+};
+
+const VMStateDescription vmstate_adapter_routes = {
+
+ .name = "s390_adapter_routes",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(adapter, AdapterRoutes, 1, vmstate_adapter_info,
+ AdapterInfo),
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c
new file mode 100644
index 000000000..efe505418
--- /dev/null
+++ b/hw/intc/s390_flic_kvm.c
@@ -0,0 +1,679 @@
+/*
+ * QEMU S390x KVM floating interrupt controller (flic)
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Jens Freimann <jfrei@linux.vnet.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "kvm/kvm_s390x.h"
+#include <sys/ioctl.h>
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "sysemu/kvm.h"
+#include "hw/s390x/s390_flic.h"
+#include "hw/s390x/adapter.h"
+#include "hw/s390x/css.h"
+#include "migration/qemu-file-types.h"
+#include "trace.h"
+#include "qom/object.h"
+
+#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size
+#define FLIC_FAILED (-1UL)
+#define FLIC_SAVEVM_VERSION 1
+
+struct KVMS390FLICState{
+ S390FLICState parent_obj;
+
+ uint32_t fd;
+ bool clear_io_supported;
+};
+
+static KVMS390FLICState *s390_get_kvm_flic(S390FLICState *fs)
+{
+ static KVMS390FLICState *flic;
+
+ if (!flic) {
+ /* we only have one flic device, so this is fine to cache */
+ flic = KVM_S390_FLIC(fs);
+ }
+ return flic;
+}
+
+/**
+ * flic_get_all_irqs - store all pending irqs in buffer
+ * @buf: pointer to buffer which is passed to kernel
+ * @len: length of buffer
+ * @flic: pointer to flic device state
+ *
+ * Returns: -ENOMEM if buffer is too small,
+ * -EINVAL if attr.group is invalid,
+ * -EFAULT if copying to userspace failed,
+ * on success return number of stored interrupts
+ */
+static int flic_get_all_irqs(KVMS390FLICState *flic,
+ void *buf, int len)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_GET_ALL_IRQS,
+ .addr = (uint64_t) buf,
+ .attr = len,
+ };
+ int rc;
+
+ rc = ioctl(flic->fd, KVM_GET_DEVICE_ATTR, &attr);
+
+ return rc == -1 ? -errno : rc;
+}
+
+static void flic_enable_pfault(KVMS390FLICState *flic)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_APF_ENABLE,
+ };
+ int rc;
+
+ rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr);
+
+ if (rc) {
+ fprintf(stderr, "flic: couldn't enable pfault\n");
+ }
+}
+
+static void flic_disable_wait_pfault(KVMS390FLICState *flic)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_APF_DISABLE_WAIT,
+ };
+ int rc;
+
+ rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr);
+
+ if (rc) {
+ fprintf(stderr, "flic: couldn't disable pfault\n");
+ }
+}
+
+/** flic_enqueue_irqs - returns 0 on success
+ * @buf: pointer to buffer which is passed to kernel
+ * @len: length of buffer
+ * @flic: pointer to flic device state
+ *
+ * Returns: -EINVAL if attr.group is unknown
+ */
+static int flic_enqueue_irqs(void *buf, uint64_t len,
+ KVMS390FLICState *flic)
+{
+ int rc;
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_ENQUEUE,
+ .addr = (uint64_t) buf,
+ .attr = len,
+ };
+
+ rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr);
+
+ return rc ? -errno : 0;
+}
+
+static void kvm_s390_inject_flic(S390FLICState *fs, struct kvm_s390_irq *irq)
+{
+ static bool use_flic = true;
+ int r;
+
+ if (use_flic) {
+ r = flic_enqueue_irqs(irq, sizeof(*irq), s390_get_kvm_flic(fs));
+ if (r == -ENOSYS) {
+ use_flic = false;
+ }
+ if (!r) {
+ return;
+ }
+ }
+ /* fallback to legacy KVM IOCTL in case FLIC fails */
+ kvm_s390_floating_interrupt_legacy(irq);
+}
+
+static void kvm_s390_inject_service(S390FLICState *fs, uint32_t parm)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_INT_SERVICE,
+ .u.ext.ext_params = parm,
+ };
+
+ kvm_s390_inject_flic(fs, &irq);
+}
+
+static void kvm_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
+ uint16_t subchannel_nr, uint32_t io_int_parm,
+ uint32_t io_int_word)
+{
+ struct kvm_s390_irq irq = {
+ .u.io.subchannel_id = subchannel_id,
+ .u.io.subchannel_nr = subchannel_nr,
+ .u.io.io_int_parm = io_int_parm,
+ .u.io.io_int_word = io_int_word,
+ };
+
+ if (io_int_word & IO_INT_WORD_AI) {
+ irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
+ } else {
+ irq.type = KVM_S390_INT_IO(0, (subchannel_id & 0xff00) >> 8,
+ (subchannel_id & 0x0006),
+ subchannel_nr);
+ }
+ kvm_s390_inject_flic(fs, &irq);
+}
+
+static void kvm_s390_inject_crw_mchk(S390FLICState *fs)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_MCHK,
+ .u.mchk.cr14 = CR14_CHANNEL_REPORT_SC,
+ .u.mchk.mcic = s390_build_validity_mcic() | MCIC_SC_CP,
+ };
+
+ kvm_s390_inject_flic(fs, &irq);
+}
+
+static int kvm_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
+ uint16_t subchannel_nr)
+{
+ KVMS390FLICState *flic = s390_get_kvm_flic(fs);
+ int rc;
+ uint32_t sid = subchannel_id << 16 | subchannel_nr;
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_CLEAR_IO_IRQ,
+ .addr = (uint64_t) &sid,
+ .attr = sizeof(sid),
+ };
+ if (unlikely(!flic->clear_io_supported)) {
+ return -ENOSYS;
+ }
+ rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr);
+ return rc ? -errno : 0;
+}
+
+static int kvm_s390_modify_ais_mode(S390FLICState *fs, uint8_t isc,
+ uint16_t mode)
+{
+ KVMS390FLICState *flic = s390_get_kvm_flic(fs);
+ struct kvm_s390_ais_req req = {
+ .isc = isc,
+ .mode = mode,
+ };
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_AISM,
+ .addr = (uint64_t)&req,
+ };
+
+ if (!fs->ais_supported) {
+ return -ENOSYS;
+ }
+
+ return ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr) ? -errno : 0;
+}
+
+static int kvm_s390_inject_airq(S390FLICState *fs, uint8_t type,
+ uint8_t isc, uint8_t flags)
+{
+ KVMS390FLICState *flic = s390_get_kvm_flic(fs);
+ uint32_t id = css_get_adapter_id(type, isc);
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_AIRQ_INJECT,
+ .attr = id,
+ };
+
+ if (!fs->ais_supported) {
+ return -ENOSYS;
+ }
+
+ return ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr) ? -errno : 0;
+}
+
+/**
+ * __get_all_irqs - store all pending irqs in buffer
+ * @flic: pointer to flic device state
+ * @buf: pointer to pointer to a buffer
+ * @len: length of buffer
+ *
+ * Returns: return value of flic_get_all_irqs
+ * Note: Retry and increase buffer size until flic_get_all_irqs
+ * either returns a value >= 0 or a negative error code.
+ * -ENOMEM is an exception, which means the buffer is too small
+ * and we should try again. Other negative error codes can be
+ * -EFAULT and -EINVAL which we ignore at this point
+ */
+static int __get_all_irqs(KVMS390FLICState *flic,
+ void **buf, int len)
+{
+ int r;
+
+ do {
+ /* returns -ENOMEM if buffer is too small and number
+ * of queued interrupts on success */
+ r = flic_get_all_irqs(flic, *buf, len);
+ if (r >= 0) {
+ break;
+ }
+ len *= 2;
+ *buf = g_try_realloc(*buf, len);
+ if (!buf) {
+ return -ENOMEM;
+ }
+ } while (r == -ENOMEM && len <= KVM_S390_FLIC_MAX_BUFFER);
+
+ return r;
+}
+
+static int kvm_s390_register_io_adapter(S390FLICState *fs, uint32_t id,
+ uint8_t isc, bool swap,
+ bool is_maskable, uint8_t flags)
+{
+ struct kvm_s390_io_adapter adapter = {
+ .id = id,
+ .isc = isc,
+ .maskable = is_maskable,
+ .swap = swap,
+ .flags = flags,
+ };
+ KVMS390FLICState *flic = KVM_S390_FLIC(fs);
+ int r;
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_ADAPTER_REGISTER,
+ .addr = (uint64_t)&adapter,
+ };
+
+ if (!kvm_gsi_routing_enabled()) {
+ /* nothing to do */
+ return 0;
+ }
+
+ r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr);
+
+ return r ? -errno : 0;
+}
+
+static int kvm_s390_io_adapter_map(S390FLICState *fs, uint32_t id,
+ uint64_t map_addr, bool do_map)
+{
+ struct kvm_s390_io_adapter_req req = {
+ .id = id,
+ .type = do_map ? KVM_S390_IO_ADAPTER_MAP : KVM_S390_IO_ADAPTER_UNMAP,
+ .addr = map_addr,
+ };
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_ADAPTER_MODIFY,
+ .addr = (uint64_t)&req,
+ };
+ KVMS390FLICState *flic = s390_get_kvm_flic(fs);
+ int r;
+
+ if (!kvm_gsi_routing_enabled()) {
+ /* nothing to do */
+ return 0;
+ }
+
+ r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr);
+ return r ? -errno : 0;
+}
+
+static int kvm_s390_add_adapter_routes(S390FLICState *fs,
+ AdapterRoutes *routes)
+{
+ int ret, i;
+ uint64_t ind_offset = routes->adapter.ind_offset;
+
+ if (!kvm_gsi_routing_enabled()) {
+ return -ENOSYS;
+ }
+
+ for (i = 0; i < routes->num_routes; i++) {
+ ret = kvm_irqchip_add_adapter_route(kvm_state, &routes->adapter);
+ if (ret < 0) {
+ goto out_undo;
+ }
+ routes->gsi[i] = ret;
+ routes->adapter.ind_offset++;
+ }
+ kvm_irqchip_commit_routes(kvm_state);
+
+ /* Restore passed-in structure to original state. */
+ routes->adapter.ind_offset = ind_offset;
+ return 0;
+out_undo:
+ while (--i >= 0) {
+ kvm_irqchip_release_virq(kvm_state, routes->gsi[i]);
+ routes->gsi[i] = -1;
+ }
+ routes->adapter.ind_offset = ind_offset;
+ return ret;
+}
+
+static void kvm_s390_release_adapter_routes(S390FLICState *fs,
+ AdapterRoutes *routes)
+{
+ int i;
+
+ if (!kvm_gsi_routing_enabled()) {
+ return;
+ }
+
+ for (i = 0; i < routes->num_routes; i++) {
+ if (routes->gsi[i] >= 0) {
+ kvm_irqchip_release_virq(kvm_state, routes->gsi[i]);
+ routes->gsi[i] = -1;
+ }
+ }
+}
+
+/**
+ * kvm_flic_save - Save pending floating interrupts
+ * @f: QEMUFile containing migration state
+ * @opaque: pointer to flic device state
+ * @size: ignored
+ *
+ * Note: Pass buf and len to kernel. Start with one page and
+ * increase until buffer is sufficient or maxium size is
+ * reached
+ */
+static int kvm_flic_save(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ KVMS390FLICState *flic = opaque;
+ int len = FLIC_SAVE_INITIAL_SIZE;
+ void *buf;
+ int count;
+ int r = 0;
+
+ flic_disable_wait_pfault((struct KVMS390FLICState *) opaque);
+
+ buf = g_try_malloc0(len);
+ if (!buf) {
+ /* Storing FLIC_FAILED into the count field here will cause the
+ * target system to fail when attempting to load irqs from the
+ * migration state */
+ error_report("flic: couldn't allocate memory");
+ qemu_put_be64(f, FLIC_FAILED);
+ return -ENOMEM;
+ }
+
+ count = __get_all_irqs(flic, &buf, len);
+ if (count < 0) {
+ error_report("flic: couldn't retrieve irqs from kernel, rc %d",
+ count);
+ /* Storing FLIC_FAILED into the count field here will cause the
+ * target system to fail when attempting to load irqs from the
+ * migration state */
+ qemu_put_be64(f, FLIC_FAILED);
+ r = count;
+ } else {
+ qemu_put_be64(f, count);
+ qemu_put_buffer(f, (uint8_t *) buf,
+ count * sizeof(struct kvm_s390_irq));
+ }
+ g_free(buf);
+
+ return r;
+}
+
+/**
+ * kvm_flic_load - Load pending floating interrupts
+ * @f: QEMUFile containing migration state
+ * @opaque: pointer to flic device state
+ * @size: ignored
+ *
+ * Returns: value of flic_enqueue_irqs, -EINVAL on error
+ * Note: Do nothing when no interrupts where stored
+ * in QEMUFile
+ */
+static int kvm_flic_load(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ uint64_t len = 0;
+ uint64_t count = 0;
+ void *buf = NULL;
+ int r = 0;
+
+ flic_enable_pfault((struct KVMS390FLICState *) opaque);
+
+ count = qemu_get_be64(f);
+ len = count * sizeof(struct kvm_s390_irq);
+ if (count == FLIC_FAILED) {
+ return -EINVAL;
+ }
+ if (count == 0) {
+ return 0;
+ }
+ buf = g_try_malloc0(len);
+ if (!buf) {
+ return -ENOMEM;
+ }
+
+ if (qemu_get_buffer(f, (uint8_t *) buf, len) != len) {
+ r = -EINVAL;
+ goto out_free;
+ }
+ r = flic_enqueue_irqs(buf, len, (struct KVMS390FLICState *) opaque);
+
+out_free:
+ g_free(buf);
+ return r;
+}
+
+typedef struct KVMS390FLICStateMigTmp {
+ KVMS390FLICState *parent;
+ uint8_t simm;
+ uint8_t nimm;
+} KVMS390FLICStateMigTmp;
+
+static int kvm_flic_ais_pre_save(void *opaque)
+{
+ KVMS390FLICStateMigTmp *tmp = opaque;
+ KVMS390FLICState *flic = tmp->parent;
+ struct kvm_s390_ais_all ais;
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_AISM_ALL,
+ .addr = (uint64_t)&ais,
+ .attr = sizeof(ais),
+ };
+
+ if (ioctl(flic->fd, KVM_GET_DEVICE_ATTR, &attr)) {
+ error_report("Failed to retrieve kvm flic ais states");
+ return -EINVAL;
+ }
+
+ tmp->simm = ais.simm;
+ tmp->nimm = ais.nimm;
+
+ return 0;
+}
+
+static int kvm_flic_ais_post_load(void *opaque, int version_id)
+{
+ KVMS390FLICStateMigTmp *tmp = opaque;
+ KVMS390FLICState *flic = tmp->parent;
+ struct kvm_s390_ais_all ais = {
+ .simm = tmp->simm,
+ .nimm = tmp->nimm,
+ };
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_AISM_ALL,
+ .addr = (uint64_t)&ais,
+ };
+
+ /* This can happen when the user mis-configures its guests in an
+ * incompatible fashion or without a CPU model. For example using
+ * qemu with -cpu host (which is not migration safe) and do a
+ * migration from a host that has AIS to a host that has no AIS.
+ * In that case the target system will reject the migration here.
+ */
+ if (!ais_needed(flic)) {
+ return -ENOSYS;
+ }
+
+ return ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr) ? -errno : 0;
+}
+
+static const VMStateDescription kvm_s390_flic_ais_tmp = {
+ .name = "s390-flic-ais-tmp",
+ .pre_save = kvm_flic_ais_pre_save,
+ .post_load = kvm_flic_ais_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(simm, KVMS390FLICStateMigTmp),
+ VMSTATE_UINT8(nimm, KVMS390FLICStateMigTmp),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription kvm_s390_flic_vmstate_ais = {
+ .name = "s390-flic/ais",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ais_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_WITH_TMP(KVMS390FLICState, KVMS390FLICStateMigTmp,
+ kvm_s390_flic_ais_tmp),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription kvm_s390_flic_vmstate = {
+ /* should have been like kvm-s390-flic,
+ * can't change without breaking compat */
+ .name = "s390-flic",
+ .version_id = FLIC_SAVEVM_VERSION,
+ .minimum_version_id = FLIC_SAVEVM_VERSION,
+ .fields = (VMStateField[]) {
+ {
+ .name = "irqs",
+ .info = &(const VMStateInfo) {
+ .name = "irqs",
+ .get = kvm_flic_load,
+ .put = kvm_flic_save,
+ },
+ .flags = VMS_SINGLE,
+ },
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &kvm_s390_flic_vmstate_ais,
+ NULL
+ }
+};
+
+struct KVMS390FLICStateClass {
+ S390FLICStateClass parent_class;
+ DeviceRealize parent_realize;
+};
+typedef struct KVMS390FLICStateClass KVMS390FLICStateClass;
+
+DECLARE_CLASS_CHECKERS(KVMS390FLICStateClass, KVM_S390_FLIC,
+ TYPE_KVM_S390_FLIC)
+
+
+static void kvm_s390_flic_realize(DeviceState *dev, Error **errp)
+{
+ KVMS390FLICState *flic_state = KVM_S390_FLIC(dev);
+ struct kvm_create_device cd = {0};
+ struct kvm_device_attr test_attr = {0};
+ int ret;
+ Error *err = NULL;
+
+ KVM_S390_FLIC_GET_CLASS(dev)->parent_realize(dev, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ flic_state->fd = -1;
+
+ cd.type = KVM_DEV_TYPE_FLIC;
+ ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd);
+ if (ret < 0) {
+ error_setg_errno(errp, errno, "Creating the KVM device failed");
+ trace_flic_create_device(errno);
+ return;
+ }
+ flic_state->fd = cd.fd;
+
+ /* Check clear_io_irq support */
+ test_attr.group = KVM_DEV_FLIC_CLEAR_IO_IRQ;
+ flic_state->clear_io_supported = !ioctl(flic_state->fd,
+ KVM_HAS_DEVICE_ATTR, test_attr);
+}
+
+static void kvm_s390_flic_reset(DeviceState *dev)
+{
+ KVMS390FLICState *flic = KVM_S390_FLIC(dev);
+ S390FLICState *fs = S390_FLIC_COMMON(dev);
+ struct kvm_device_attr attr = {
+ .group = KVM_DEV_FLIC_CLEAR_IRQS,
+ };
+ int rc = 0;
+ uint8_t isc;
+
+ if (flic->fd == -1) {
+ return;
+ }
+
+ flic_disable_wait_pfault(flic);
+
+ if (fs->ais_supported) {
+ for (isc = 0; isc <= MAX_ISC; isc++) {
+ rc = kvm_s390_modify_ais_mode(fs, isc, SIC_IRQ_MODE_ALL);
+ if (rc) {
+ error_report("Failed to reset ais mode for isc %d: %s",
+ isc, strerror(-rc));
+ }
+ }
+ }
+
+ rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr);
+ if (rc) {
+ trace_flic_reset_failed(errno);
+ }
+
+ flic_enable_pfault(flic);
+}
+
+static void kvm_s390_flic_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc);
+
+ KVM_S390_FLIC_CLASS(oc)->parent_realize = dc->realize;
+ dc->realize = kvm_s390_flic_realize;
+ dc->vmsd = &kvm_s390_flic_vmstate;
+ dc->reset = kvm_s390_flic_reset;
+ fsc->register_io_adapter = kvm_s390_register_io_adapter;
+ fsc->io_adapter_map = kvm_s390_io_adapter_map;
+ fsc->add_adapter_routes = kvm_s390_add_adapter_routes;
+ fsc->release_adapter_routes = kvm_s390_release_adapter_routes;
+ fsc->clear_io_irq = kvm_s390_clear_io_flic;
+ fsc->modify_ais_mode = kvm_s390_modify_ais_mode;
+ fsc->inject_airq = kvm_s390_inject_airq;
+ fsc->inject_service = kvm_s390_inject_service;
+ fsc->inject_io = kvm_s390_inject_io;
+ fsc->inject_crw_mchk = kvm_s390_inject_crw_mchk;
+}
+
+static const TypeInfo kvm_s390_flic_info = {
+ .name = TYPE_KVM_S390_FLIC,
+ .parent = TYPE_S390_FLIC_COMMON,
+ .instance_size = sizeof(KVMS390FLICState),
+ .class_size = sizeof(KVMS390FLICStateClass),
+ .class_init = kvm_s390_flic_class_init,
+};
+
+static void kvm_s390_flic_register_types(void)
+{
+ type_register_static(&kvm_s390_flic_info);
+}
+
+type_init(kvm_s390_flic_register_types)
diff --git a/hw/intc/sh_intc.c b/hw/intc/sh_intc.c
new file mode 100644
index 000000000..c9b0b0c1e
--- /dev/null
+++ b/hw/intc/sh_intc.c
@@ -0,0 +1,449 @@
+/*
+ * SuperH interrupt controller module
+ *
+ * Copyright (c) 2007 Magnus Damm
+ * Based on sh_timer.c and arm_timer.c by Paul Brook
+ * Copyright (c) 2005-2006 CodeSourcery.
+ *
+ * This code is licensed under the GPL.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "hw/sh4/sh_intc.h"
+#include "hw/irq.h"
+#include "hw/sh4/sh.h"
+#include "trace.h"
+
+void sh_intc_toggle_source(struct intc_source *source,
+ int enable_adj, int assert_adj)
+{
+ int enable_changed = 0;
+ int pending_changed = 0;
+ int old_pending;
+
+ if (source->enable_count == source->enable_max && enable_adj == -1) {
+ enable_changed = -1;
+ }
+ source->enable_count += enable_adj;
+
+ if (source->enable_count == source->enable_max) {
+ enable_changed = 1;
+ }
+ source->asserted += assert_adj;
+
+ old_pending = source->pending;
+ source->pending = source->asserted &&
+ (source->enable_count == source->enable_max);
+
+ if (old_pending != source->pending) {
+ pending_changed = 1;
+ }
+ if (pending_changed) {
+ if (source->pending) {
+ source->parent->pending++;
+ if (source->parent->pending == 1) {
+ cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD);
+ }
+ } else {
+ source->parent->pending--;
+ if (source->parent->pending == 0) {
+ cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD);
+ }
+ }
+ }
+
+ if (enable_changed || assert_adj || pending_changed) {
+ trace_sh_intc_sources(source->parent->pending, source->asserted,
+ source->enable_count, source->enable_max,
+ source->vect, source->asserted ? "asserted " :
+ assert_adj ? "deasserted" : "",
+ enable_changed == 1 ? "enabled " :
+ enable_changed == -1 ? "disabled " : "",
+ source->pending ? "pending" : "");
+ }
+}
+
+static void sh_intc_set_irq(void *opaque, int n, int level)
+{
+ struct intc_desc *desc = opaque;
+ struct intc_source *source = &desc->sources[n];
+
+ if (level && !source->asserted) {
+ sh_intc_toggle_source(source, 0, 1);
+ } else if (!level && source->asserted) {
+ sh_intc_toggle_source(source, 0, -1);
+ }
+}
+
+int sh_intc_get_pending_vector(struct intc_desc *desc, int imask)
+{
+ unsigned int i;
+
+ /* slow: use a linked lists of pending sources instead */
+ /* wrong: take interrupt priority into account (one list per priority) */
+
+ if (imask == 0x0f) {
+ return -1; /* FIXME, update code to include priority per source */
+ }
+
+ for (i = 0; i < desc->nr_sources; i++) {
+ struct intc_source *source = &desc->sources[i];
+
+ if (source->pending) {
+ trace_sh_intc_pending(desc->pending, source->vect);
+ return source->vect;
+ }
+ }
+ g_assert_not_reached();
+}
+
+typedef enum {
+ INTC_MODE_NONE,
+ INTC_MODE_DUAL_SET,
+ INTC_MODE_DUAL_CLR,
+ INTC_MODE_ENABLE_REG,
+ INTC_MODE_MASK_REG,
+} SHIntCMode;
+#define INTC_MODE_IS_PRIO 0x80
+
+static SHIntCMode sh_intc_mode(unsigned long address, unsigned long set_reg,
+ unsigned long clr_reg)
+{
+ if (address != A7ADDR(set_reg) && address != A7ADDR(clr_reg)) {
+ return INTC_MODE_NONE;
+ }
+ if (set_reg && clr_reg) {
+ return address == A7ADDR(set_reg) ?
+ INTC_MODE_DUAL_SET : INTC_MODE_DUAL_CLR;
+ }
+ return set_reg ? INTC_MODE_ENABLE_REG : INTC_MODE_MASK_REG;
+}
+
+static void sh_intc_locate(struct intc_desc *desc,
+ unsigned long address,
+ unsigned long **datap,
+ intc_enum **enums,
+ unsigned int *first,
+ unsigned int *width,
+ unsigned int *modep)
+{
+ SHIntCMode mode;
+ unsigned int i;
+
+ /* this is slow but works for now */
+
+ if (desc->mask_regs) {
+ for (i = 0; i < desc->nr_mask_regs; i++) {
+ struct intc_mask_reg *mr = &desc->mask_regs[i];
+
+ mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg);
+ if (mode != INTC_MODE_NONE) {
+ *modep = mode;
+ *datap = &mr->value;
+ *enums = mr->enum_ids;
+ *first = mr->reg_width - 1;
+ *width = 1;
+ return;
+ }
+ }
+ }
+
+ if (desc->prio_regs) {
+ for (i = 0; i < desc->nr_prio_regs; i++) {
+ struct intc_prio_reg *pr = &desc->prio_regs[i];
+
+ mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg);
+ if (mode != INTC_MODE_NONE) {
+ *modep = mode | INTC_MODE_IS_PRIO;
+ *datap = &pr->value;
+ *enums = pr->enum_ids;
+ *first = pr->reg_width / pr->field_width - 1;
+ *width = pr->field_width;
+ return;
+ }
+ }
+ }
+ g_assert_not_reached();
+}
+
+static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id,
+ int enable, int is_group)
+{
+ struct intc_source *source = &desc->sources[id];
+
+ if (!id) {
+ return;
+ }
+ if (!source->next_enum_id && (!source->enable_max || !source->vect)) {
+ qemu_log_mask(LOG_UNIMP,
+ "sh_intc: reserved interrupt source %d modified\n", id);
+ return;
+ }
+
+ if (source->vect) {
+ sh_intc_toggle_source(source, enable ? 1 : -1, 0);
+ }
+
+ if ((is_group || !source->vect) && source->next_enum_id) {
+ sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1);
+ }
+
+ if (!source->vect) {
+ trace_sh_intc_set(id, !!enable);
+ }
+}
+
+static uint64_t sh_intc_read(void *opaque, hwaddr offset, unsigned size)
+{
+ struct intc_desc *desc = opaque;
+ intc_enum *enum_ids;
+ unsigned int first;
+ unsigned int width;
+ unsigned int mode;
+ unsigned long *valuep;
+
+ sh_intc_locate(desc, (unsigned long)offset, &valuep,
+ &enum_ids, &first, &width, &mode);
+ trace_sh_intc_read(size, (uint64_t)offset, *valuep);
+ return *valuep;
+}
+
+static void sh_intc_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ struct intc_desc *desc = opaque;
+ intc_enum *enum_ids;
+ unsigned int first;
+ unsigned int width;
+ unsigned int mode;
+ unsigned long *valuep;
+ unsigned int k;
+ unsigned long mask;
+
+ trace_sh_intc_write(size, (uint64_t)offset, value);
+ sh_intc_locate(desc, (unsigned long)offset, &valuep,
+ &enum_ids, &first, &width, &mode);
+ switch (mode) {
+ case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO:
+ break;
+ case INTC_MODE_DUAL_SET:
+ value |= *valuep;
+ break;
+ case INTC_MODE_DUAL_CLR:
+ value = *valuep & ~value;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ for (k = 0; k <= first; k++) {
+ mask = (1 << width) - 1;
+ mask <<= (first - k) * width;
+
+ if ((*valuep & mask) != (value & mask)) {
+ sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0);
+ }
+ }
+
+ *valuep = value;
+}
+
+static const MemoryRegionOps sh_intc_ops = {
+ .read = sh_intc_read,
+ .write = sh_intc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void sh_intc_register_source(struct intc_desc *desc,
+ intc_enum source,
+ struct intc_group *groups,
+ int nr_groups)
+{
+ unsigned int i, k;
+ intc_enum id;
+
+ if (desc->mask_regs) {
+ for (i = 0; i < desc->nr_mask_regs; i++) {
+ struct intc_mask_reg *mr = &desc->mask_regs[i];
+
+ for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) {
+ id = mr->enum_ids[k];
+ if (id && id == source) {
+ desc->sources[id].enable_max++;
+ }
+ }
+ }
+ }
+
+ if (desc->prio_regs) {
+ for (i = 0; i < desc->nr_prio_regs; i++) {
+ struct intc_prio_reg *pr = &desc->prio_regs[i];
+
+ for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) {
+ id = pr->enum_ids[k];
+ if (id && id == source) {
+ desc->sources[id].enable_max++;
+ }
+ }
+ }
+ }
+
+ if (groups) {
+ for (i = 0; i < nr_groups; i++) {
+ struct intc_group *gr = &groups[i];
+
+ for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) {
+ id = gr->enum_ids[k];
+ if (id && id == source) {
+ desc->sources[id].enable_max++;
+ }
+ }
+ }
+ }
+
+}
+
+void sh_intc_register_sources(struct intc_desc *desc,
+ struct intc_vect *vectors,
+ int nr_vectors,
+ struct intc_group *groups,
+ int nr_groups)
+{
+ unsigned int i, k;
+ intc_enum id;
+ struct intc_source *s;
+
+ for (i = 0; i < nr_vectors; i++) {
+ struct intc_vect *vect = &vectors[i];
+
+ sh_intc_register_source(desc, vect->enum_id, groups, nr_groups);
+ id = vect->enum_id;
+ if (id) {
+ s = &desc->sources[id];
+ s->vect = vect->vect;
+ trace_sh_intc_register("source", vect->enum_id, s->vect,
+ s->enable_count, s->enable_max);
+ }
+ }
+
+ if (groups) {
+ for (i = 0; i < nr_groups; i++) {
+ struct intc_group *gr = &groups[i];
+
+ id = gr->enum_id;
+ s = &desc->sources[id];
+ s->next_enum_id = gr->enum_ids[0];
+
+ for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) {
+ if (gr->enum_ids[k]) {
+ id = gr->enum_ids[k - 1];
+ s = &desc->sources[id];
+ s->next_enum_id = gr->enum_ids[k];
+ }
+ }
+ trace_sh_intc_register("group", gr->enum_id, 0xffff,
+ s->enable_count, s->enable_max);
+ }
+ }
+}
+
+static unsigned int sh_intc_register(MemoryRegion *sysmem,
+ struct intc_desc *desc,
+ const unsigned long address,
+ const char *type,
+ const char *action,
+ const unsigned int index)
+{
+ char name[60];
+ MemoryRegion *iomem, *iomem_p4, *iomem_a7;
+
+ if (!address) {
+ return 0;
+ }
+
+ iomem = &desc->iomem;
+ iomem_p4 = &desc->iomem_aliases[index];
+ iomem_a7 = iomem_p4 + 1;
+
+ snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "p4");
+ memory_region_init_alias(iomem_p4, NULL, name, iomem, A7ADDR(address), 4);
+ memory_region_add_subregion(sysmem, P4ADDR(address), iomem_p4);
+
+ snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "a7");
+ memory_region_init_alias(iomem_a7, NULL, name, iomem, A7ADDR(address), 4);
+ memory_region_add_subregion(sysmem, A7ADDR(address), iomem_a7);
+
+ /* used to increment aliases index */
+ return 2;
+}
+
+int sh_intc_init(MemoryRegion *sysmem,
+ struct intc_desc *desc,
+ int nr_sources,
+ struct intc_mask_reg *mask_regs,
+ int nr_mask_regs,
+ struct intc_prio_reg *prio_regs,
+ int nr_prio_regs)
+{
+ unsigned int i, j;
+
+ desc->pending = 0;
+ desc->nr_sources = nr_sources;
+ desc->mask_regs = mask_regs;
+ desc->nr_mask_regs = nr_mask_regs;
+ desc->prio_regs = prio_regs;
+ desc->nr_prio_regs = nr_prio_regs;
+ /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases) */
+ desc->iomem_aliases = g_new0(MemoryRegion,
+ (nr_mask_regs + nr_prio_regs) * 4);
+ desc->sources = g_new0(struct intc_source, nr_sources);
+ for (i = 0; i < nr_sources; i++) {
+ desc->sources[i].parent = desc;
+ }
+ desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources);
+ memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, "intc",
+ 0x100000000ULL);
+ j = 0;
+ if (desc->mask_regs) {
+ for (i = 0; i < desc->nr_mask_regs; i++) {
+ struct intc_mask_reg *mr = &desc->mask_regs[i];
+
+ j += sh_intc_register(sysmem, desc, mr->set_reg, "mask", "set", j);
+ j += sh_intc_register(sysmem, desc, mr->clr_reg, "mask", "clr", j);
+ }
+ }
+
+ if (desc->prio_regs) {
+ for (i = 0; i < desc->nr_prio_regs; i++) {
+ struct intc_prio_reg *pr = &desc->prio_regs[i];
+
+ j += sh_intc_register(sysmem, desc, pr->set_reg, "prio", "set", j);
+ j += sh_intc_register(sysmem, desc, pr->clr_reg, "prio", "clr", j);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Assert level <n> IRL interrupt.
+ * 0:deassert. 1:lowest priority,... 15:highest priority
+ */
+void sh_intc_set_irl(void *opaque, int n, int level)
+{
+ struct intc_source *s = opaque;
+ int i, irl = level ^ 15;
+ intc_enum id = s->next_enum_id;
+
+ for (i = 0; id; id = s->next_enum_id, i++) {
+ s = &s->parent->sources[id];
+ if (i == irl) {
+ sh_intc_toggle_source(s, s->enable_count ? 0 : 1,
+ s->asserted ? 0 : 1);
+ } else if (s->asserted) {
+ sh_intc_toggle_source(s, 0, -1);
+ }
+ }
+}
diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c
new file mode 100644
index 000000000..877e76877
--- /dev/null
+++ b/hw/intc/sifive_plic.c
@@ -0,0 +1,563 @@
+/*
+ * SiFive PLIC (Platform Level Interrupt Controller)
+ *
+ * Copyright (c) 2017 SiFive, Inc.
+ *
+ * This provides a parameterizable interrupt controller based on SiFive's PLIC.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qemu/error-report.h"
+#include "hw/sysbus.h"
+#include "hw/pci/msi.h"
+#include "hw/qdev-properties.h"
+#include "hw/intc/sifive_plic.h"
+#include "target/riscv/cpu.h"
+#include "migration/vmstate.h"
+#include "hw/irq.h"
+
+#define RISCV_DEBUG_PLIC 0
+
+static PLICMode char_to_mode(char c)
+{
+ switch (c) {
+ case 'U': return PLICMode_U;
+ case 'S': return PLICMode_S;
+ case 'H': return PLICMode_H;
+ case 'M': return PLICMode_M;
+ default:
+ error_report("plic: invalid mode '%c'", c);
+ exit(1);
+ }
+}
+
+static char mode_to_char(PLICMode m)
+{
+ switch (m) {
+ case PLICMode_U: return 'U';
+ case PLICMode_S: return 'S';
+ case PLICMode_H: return 'H';
+ case PLICMode_M: return 'M';
+ default: return '?';
+ }
+}
+
+static void sifive_plic_print_state(SiFivePLICState *plic)
+{
+ int i;
+ int addrid;
+
+ /* pending */
+ qemu_log("pending : ");
+ for (i = plic->bitfield_words - 1; i >= 0; i--) {
+ qemu_log("%08x", plic->pending[i]);
+ }
+ qemu_log("\n");
+
+ /* pending */
+ qemu_log("claimed : ");
+ for (i = plic->bitfield_words - 1; i >= 0; i--) {
+ qemu_log("%08x", plic->claimed[i]);
+ }
+ qemu_log("\n");
+
+ for (addrid = 0; addrid < plic->num_addrs; addrid++) {
+ qemu_log("hart%d-%c enable: ",
+ plic->addr_config[addrid].hartid,
+ mode_to_char(plic->addr_config[addrid].mode));
+ for (i = plic->bitfield_words - 1; i >= 0; i--) {
+ qemu_log("%08x", plic->enable[addrid * plic->bitfield_words + i]);
+ }
+ qemu_log("\n");
+ }
+}
+
+static uint32_t atomic_set_masked(uint32_t *a, uint32_t mask, uint32_t value)
+{
+ uint32_t old, new, cmp = qatomic_read(a);
+
+ do {
+ old = cmp;
+ new = (old & ~mask) | (value & mask);
+ cmp = qatomic_cmpxchg(a, old, new);
+ } while (old != cmp);
+
+ return old;
+}
+
+static void sifive_plic_set_pending(SiFivePLICState *plic, int irq, bool level)
+{
+ atomic_set_masked(&plic->pending[irq >> 5], 1 << (irq & 31), -!!level);
+}
+
+static void sifive_plic_set_claimed(SiFivePLICState *plic, int irq, bool level)
+{
+ atomic_set_masked(&plic->claimed[irq >> 5], 1 << (irq & 31), -!!level);
+}
+
+static int sifive_plic_irqs_pending(SiFivePLICState *plic, uint32_t addrid)
+{
+ int i, j;
+ for (i = 0; i < plic->bitfield_words; i++) {
+ uint32_t pending_enabled_not_claimed =
+ (plic->pending[i] & ~plic->claimed[i]) &
+ plic->enable[addrid * plic->bitfield_words + i];
+ if (!pending_enabled_not_claimed) {
+ continue;
+ }
+ for (j = 0; j < 32; j++) {
+ int irq = (i << 5) + j;
+ uint32_t prio = plic->source_priority[irq];
+ int enabled = pending_enabled_not_claimed & (1 << j);
+ if (enabled && prio > plic->target_priority[addrid]) {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static void sifive_plic_update(SiFivePLICState *plic)
+{
+ int addrid;
+
+ /* raise irq on harts where this irq is enabled */
+ for (addrid = 0; addrid < plic->num_addrs; addrid++) {
+ uint32_t hartid = plic->addr_config[addrid].hartid;
+ PLICMode mode = plic->addr_config[addrid].mode;
+ int level = sifive_plic_irqs_pending(plic, addrid);
+
+ switch (mode) {
+ case PLICMode_M:
+ qemu_set_irq(plic->m_external_irqs[hartid - plic->hartid_base], level);
+ break;
+ case PLICMode_S:
+ qemu_set_irq(plic->s_external_irqs[hartid - plic->hartid_base], level);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (RISCV_DEBUG_PLIC) {
+ sifive_plic_print_state(plic);
+ }
+}
+
+static uint32_t sifive_plic_claim(SiFivePLICState *plic, uint32_t addrid)
+{
+ int i, j;
+ uint32_t max_irq = 0;
+ uint32_t max_prio = plic->target_priority[addrid];
+
+ for (i = 0; i < plic->bitfield_words; i++) {
+ uint32_t pending_enabled_not_claimed =
+ (plic->pending[i] & ~plic->claimed[i]) &
+ plic->enable[addrid * plic->bitfield_words + i];
+ if (!pending_enabled_not_claimed) {
+ continue;
+ }
+ for (j = 0; j < 32; j++) {
+ int irq = (i << 5) + j;
+ uint32_t prio = plic->source_priority[irq];
+ int enabled = pending_enabled_not_claimed & (1 << j);
+ if (enabled && prio > max_prio) {
+ max_irq = irq;
+ max_prio = prio;
+ }
+ }
+ }
+
+ if (max_irq) {
+ sifive_plic_set_pending(plic, max_irq, false);
+ sifive_plic_set_claimed(plic, max_irq, true);
+ }
+ return max_irq;
+}
+
+static uint64_t sifive_plic_read(void *opaque, hwaddr addr, unsigned size)
+{
+ SiFivePLICState *plic = opaque;
+
+ /* writes must be 4 byte words */
+ if ((addr & 0x3) != 0) {
+ goto err;
+ }
+
+ if (addr >= plic->priority_base && /* 4 bytes per source */
+ addr < plic->priority_base + (plic->num_sources << 2))
+ {
+ uint32_t irq = ((addr - plic->priority_base) >> 2) + 1;
+ if (RISCV_DEBUG_PLIC) {
+ qemu_log("plic: read priority: irq=%d priority=%d\n",
+ irq, plic->source_priority[irq]);
+ }
+ return plic->source_priority[irq];
+ } else if (addr >= plic->pending_base && /* 1 bit per source */
+ addr < plic->pending_base + (plic->num_sources >> 3))
+ {
+ uint32_t word = (addr - plic->pending_base) >> 2;
+ if (RISCV_DEBUG_PLIC) {
+ qemu_log("plic: read pending: word=%d value=%d\n",
+ word, plic->pending[word]);
+ }
+ return plic->pending[word];
+ } else if (addr >= plic->enable_base && /* 1 bit per source */
+ addr < plic->enable_base + plic->num_addrs * plic->enable_stride)
+ {
+ uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride;
+ uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2;
+ if (wordid < plic->bitfield_words) {
+ if (RISCV_DEBUG_PLIC) {
+ qemu_log("plic: read enable: hart%d-%c word=%d value=%x\n",
+ plic->addr_config[addrid].hartid,
+ mode_to_char(plic->addr_config[addrid].mode), wordid,
+ plic->enable[addrid * plic->bitfield_words + wordid]);
+ }
+ return plic->enable[addrid * plic->bitfield_words + wordid];
+ }
+ } else if (addr >= plic->context_base && /* 1 bit per source */
+ addr < plic->context_base + plic->num_addrs * plic->context_stride)
+ {
+ uint32_t addrid = (addr - plic->context_base) / plic->context_stride;
+ uint32_t contextid = (addr & (plic->context_stride - 1));
+ if (contextid == 0) {
+ if (RISCV_DEBUG_PLIC) {
+ qemu_log("plic: read priority: hart%d-%c priority=%x\n",
+ plic->addr_config[addrid].hartid,
+ mode_to_char(plic->addr_config[addrid].mode),
+ plic->target_priority[addrid]);
+ }
+ return plic->target_priority[addrid];
+ } else if (contextid == 4) {
+ uint32_t value = sifive_plic_claim(plic, addrid);
+ if (RISCV_DEBUG_PLIC) {
+ qemu_log("plic: read claim: hart%d-%c irq=%x\n",
+ plic->addr_config[addrid].hartid,
+ mode_to_char(plic->addr_config[addrid].mode),
+ value);
+ }
+ sifive_plic_update(plic);
+ return value;
+ }
+ }
+
+err:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid register read 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ return 0;
+}
+
+static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+ SiFivePLICState *plic = opaque;
+
+ /* writes must be 4 byte words */
+ if ((addr & 0x3) != 0) {
+ goto err;
+ }
+
+ if (addr >= plic->priority_base && /* 4 bytes per source */
+ addr < plic->priority_base + (plic->num_sources << 2))
+ {
+ uint32_t irq = ((addr - plic->priority_base) >> 2) + 1;
+ plic->source_priority[irq] = value & 7;
+ if (RISCV_DEBUG_PLIC) {
+ qemu_log("plic: write priority: irq=%d priority=%d\n",
+ irq, plic->source_priority[irq]);
+ }
+ sifive_plic_update(plic);
+ return;
+ } else if (addr >= plic->pending_base && /* 1 bit per source */
+ addr < plic->pending_base + (plic->num_sources >> 3))
+ {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid pending write: 0x%" HWADDR_PRIx "",
+ __func__, addr);
+ return;
+ } else if (addr >= plic->enable_base && /* 1 bit per source */
+ addr < plic->enable_base + plic->num_addrs * plic->enable_stride)
+ {
+ uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride;
+ uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2;
+ if (wordid < plic->bitfield_words) {
+ plic->enable[addrid * plic->bitfield_words + wordid] = value;
+ if (RISCV_DEBUG_PLIC) {
+ qemu_log("plic: write enable: hart%d-%c word=%d value=%x\n",
+ plic->addr_config[addrid].hartid,
+ mode_to_char(plic->addr_config[addrid].mode), wordid,
+ plic->enable[addrid * plic->bitfield_words + wordid]);
+ }
+ return;
+ }
+ } else if (addr >= plic->context_base && /* 4 bytes per reg */
+ addr < plic->context_base + plic->num_addrs * plic->context_stride)
+ {
+ uint32_t addrid = (addr - plic->context_base) / plic->context_stride;
+ uint32_t contextid = (addr & (plic->context_stride - 1));
+ if (contextid == 0) {
+ if (RISCV_DEBUG_PLIC) {
+ qemu_log("plic: write priority: hart%d-%c priority=%x\n",
+ plic->addr_config[addrid].hartid,
+ mode_to_char(plic->addr_config[addrid].mode),
+ plic->target_priority[addrid]);
+ }
+ if (value <= plic->num_priorities) {
+ plic->target_priority[addrid] = value;
+ sifive_plic_update(plic);
+ }
+ return;
+ } else if (contextid == 4) {
+ if (RISCV_DEBUG_PLIC) {
+ qemu_log("plic: write claim: hart%d-%c irq=%x\n",
+ plic->addr_config[addrid].hartid,
+ mode_to_char(plic->addr_config[addrid].mode),
+ (uint32_t)value);
+ }
+ if (value < plic->num_sources) {
+ sifive_plic_set_claimed(plic, value, false);
+ sifive_plic_update(plic);
+ }
+ return;
+ }
+ }
+
+err:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid register write 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+}
+
+static const MemoryRegionOps sifive_plic_ops = {
+ .read = sifive_plic_read,
+ .write = sifive_plic_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4
+ }
+};
+
+/*
+ * parse PLIC hart/mode address offset config
+ *
+ * "M" 1 hart with M mode
+ * "MS,MS" 2 harts, 0-1 with M and S mode
+ * "M,MS,MS,MS,MS" 5 harts, 0 with M mode, 1-5 with M and S mode
+ */
+static void parse_hart_config(SiFivePLICState *plic)
+{
+ int addrid, hartid, modes;
+ const char *p;
+ char c;
+
+ /* count and validate hart/mode combinations */
+ addrid = 0, hartid = 0, modes = 0;
+ p = plic->hart_config;
+ while ((c = *p++)) {
+ if (c == ',') {
+ addrid += ctpop8(modes);
+ modes = 0;
+ hartid++;
+ } else {
+ int m = 1 << char_to_mode(c);
+ if (modes == (modes | m)) {
+ error_report("plic: duplicate mode '%c' in config: %s",
+ c, plic->hart_config);
+ exit(1);
+ }
+ modes |= m;
+ }
+ }
+ if (modes) {
+ addrid += ctpop8(modes);
+ }
+ hartid++;
+
+ plic->num_addrs = addrid;
+ plic->num_harts = hartid;
+
+ /* store hart/mode combinations */
+ plic->addr_config = g_new(PLICAddr, plic->num_addrs);
+ addrid = 0, hartid = plic->hartid_base;
+ p = plic->hart_config;
+ while ((c = *p++)) {
+ if (c == ',') {
+ hartid++;
+ } else {
+ plic->addr_config[addrid].addrid = addrid;
+ plic->addr_config[addrid].hartid = hartid;
+ plic->addr_config[addrid].mode = char_to_mode(c);
+ addrid++;
+ }
+ }
+}
+
+static void sifive_plic_irq_request(void *opaque, int irq, int level)
+{
+ SiFivePLICState *s = opaque;
+
+ sifive_plic_set_pending(s, irq, level > 0);
+ sifive_plic_update(s);
+}
+
+static void sifive_plic_realize(DeviceState *dev, Error **errp)
+{
+ SiFivePLICState *s = SIFIVE_PLIC(dev);
+ int i;
+
+ memory_region_init_io(&s->mmio, OBJECT(dev), &sifive_plic_ops, s,
+ TYPE_SIFIVE_PLIC, s->aperture_size);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio);
+
+ parse_hart_config(s);
+
+ s->bitfield_words = (s->num_sources + 31) >> 5;
+ s->num_enables = s->bitfield_words * s->num_addrs;
+ s->source_priority = g_new0(uint32_t, s->num_sources);
+ s->target_priority = g_new(uint32_t, s->num_addrs);
+ s->pending = g_new0(uint32_t, s->bitfield_words);
+ s->claimed = g_new0(uint32_t, s->bitfield_words);
+ s->enable = g_new0(uint32_t, s->num_enables);
+
+ qdev_init_gpio_in(dev, sifive_plic_irq_request, s->num_sources);
+
+ s->s_external_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts);
+ qdev_init_gpio_out(dev, s->s_external_irqs, s->num_harts);
+
+ s->m_external_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts);
+ qdev_init_gpio_out(dev, s->m_external_irqs, s->num_harts);
+
+ /* We can't allow the supervisor to control SEIP as this would allow the
+ * supervisor to clear a pending external interrupt which will result in
+ * lost a interrupt in the case a PLIC is attached. The SEIP bit must be
+ * hardware controlled when a PLIC is attached.
+ */
+ for (i = 0; i < s->num_harts; i++) {
+ RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i));
+ if (riscv_cpu_claim_interrupts(cpu, MIP_SEIP) < 0) {
+ error_report("SEIP already claimed");
+ exit(1);
+ }
+ }
+
+ msi_nonbroken = true;
+}
+
+static const VMStateDescription vmstate_sifive_plic = {
+ .name = "riscv_sifive_plic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_VARRAY_UINT32(source_priority, SiFivePLICState,
+ num_sources, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(target_priority, SiFivePLICState,
+ num_addrs, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(pending, SiFivePLICState, bitfield_words, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(claimed, SiFivePLICState, bitfield_words, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(enable, SiFivePLICState, num_enables, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property sifive_plic_properties[] = {
+ DEFINE_PROP_STRING("hart-config", SiFivePLICState, hart_config),
+ DEFINE_PROP_UINT32("hartid-base", SiFivePLICState, hartid_base, 0),
+ DEFINE_PROP_UINT32("num-sources", SiFivePLICState, num_sources, 0),
+ DEFINE_PROP_UINT32("num-priorities", SiFivePLICState, num_priorities, 0),
+ DEFINE_PROP_UINT32("priority-base", SiFivePLICState, priority_base, 0),
+ DEFINE_PROP_UINT32("pending-base", SiFivePLICState, pending_base, 0),
+ DEFINE_PROP_UINT32("enable-base", SiFivePLICState, enable_base, 0),
+ DEFINE_PROP_UINT32("enable-stride", SiFivePLICState, enable_stride, 0),
+ DEFINE_PROP_UINT32("context-base", SiFivePLICState, context_base, 0),
+ DEFINE_PROP_UINT32("context-stride", SiFivePLICState, context_stride, 0),
+ DEFINE_PROP_UINT32("aperture-size", SiFivePLICState, aperture_size, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void sifive_plic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, sifive_plic_properties);
+ dc->realize = sifive_plic_realize;
+ dc->vmsd = &vmstate_sifive_plic;
+}
+
+static const TypeInfo sifive_plic_info = {
+ .name = TYPE_SIFIVE_PLIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SiFivePLICState),
+ .class_init = sifive_plic_class_init,
+};
+
+static void sifive_plic_register_types(void)
+{
+ type_register_static(&sifive_plic_info);
+}
+
+type_init(sifive_plic_register_types)
+
+/*
+ * Create PLIC device.
+ */
+DeviceState *sifive_plic_create(hwaddr addr, char *hart_config,
+ uint32_t num_harts,
+ uint32_t hartid_base, uint32_t num_sources,
+ uint32_t num_priorities, uint32_t priority_base,
+ uint32_t pending_base, uint32_t enable_base,
+ uint32_t enable_stride, uint32_t context_base,
+ uint32_t context_stride, uint32_t aperture_size)
+{
+ DeviceState *dev = qdev_new(TYPE_SIFIVE_PLIC);
+ int i;
+
+ assert(enable_stride == (enable_stride & -enable_stride));
+ assert(context_stride == (context_stride & -context_stride));
+ qdev_prop_set_string(dev, "hart-config", hart_config);
+ qdev_prop_set_uint32(dev, "hartid-base", hartid_base);
+ qdev_prop_set_uint32(dev, "num-sources", num_sources);
+ qdev_prop_set_uint32(dev, "num-priorities", num_priorities);
+ qdev_prop_set_uint32(dev, "priority-base", priority_base);
+ qdev_prop_set_uint32(dev, "pending-base", pending_base);
+ qdev_prop_set_uint32(dev, "enable-base", enable_base);
+ qdev_prop_set_uint32(dev, "enable-stride", enable_stride);
+ qdev_prop_set_uint32(dev, "context-base", context_base);
+ qdev_prop_set_uint32(dev, "context-stride", context_stride);
+ qdev_prop_set_uint32(dev, "aperture-size", aperture_size);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
+
+ for (i = 0; i < num_harts; i++) {
+ CPUState *cpu = qemu_get_cpu(hartid_base + i);
+
+ qdev_connect_gpio_out(dev, i,
+ qdev_get_gpio_in(DEVICE(cpu), IRQ_S_EXT));
+ qdev_connect_gpio_out(dev, num_harts + i,
+ qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT));
+ }
+
+ return dev;
+}
diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c
new file mode 100644
index 000000000..f7e59ba64
--- /dev/null
+++ b/hw/intc/slavio_intctl.c
@@ -0,0 +1,475 @@
+/*
+ * QEMU Sparc SLAVIO interrupt controller emulation
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "migration/vmstate.h"
+#include "monitor/monitor.h"
+#include "qemu/module.h"
+#include "hw/sysbus.h"
+#include "hw/intc/intc.h"
+#include "hw/irq.h"
+#include "trace.h"
+#include "qom/object.h"
+
+//#define DEBUG_IRQ_COUNT
+
+/*
+ * Registers of interrupt controller in sun4m.
+ *
+ * This is the interrupt controller part of chip STP2001 (Slave I/O), also
+ * produced as NCR89C105. See
+ * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C105.txt
+ *
+ * There is a system master controller and one for each cpu.
+ *
+ */
+
+#define MAX_CPUS 16
+#define MAX_PILS 16
+
+struct SLAVIO_INTCTLState;
+
+typedef struct SLAVIO_CPUINTCTLState {
+ MemoryRegion iomem;
+ struct SLAVIO_INTCTLState *master;
+ uint32_t intreg_pending;
+ uint32_t cpu;
+ uint32_t irl_out;
+} SLAVIO_CPUINTCTLState;
+
+#define TYPE_SLAVIO_INTCTL "slavio_intctl"
+OBJECT_DECLARE_SIMPLE_TYPE(SLAVIO_INTCTLState, SLAVIO_INTCTL)
+
+struct SLAVIO_INTCTLState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+#ifdef DEBUG_IRQ_COUNT
+ uint64_t irq_count[32];
+#endif
+ qemu_irq cpu_irqs[MAX_CPUS][MAX_PILS];
+ SLAVIO_CPUINTCTLState slaves[MAX_CPUS];
+ uint32_t intregm_pending;
+ uint32_t intregm_disabled;
+ uint32_t target_cpu;
+};
+
+#define INTCTL_MAXADDR 0xf
+#define INTCTL_SIZE (INTCTL_MAXADDR + 1)
+#define INTCTLM_SIZE 0x14
+#define MASTER_IRQ_MASK ~0x0fa2007f
+#define MASTER_DISABLE 0x80000000
+#define CPU_SOFTIRQ_MASK 0xfffe0000
+#define CPU_IRQ_INT15_IN (1 << 15)
+#define CPU_IRQ_TIMER_IN (1 << 14)
+
+static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs);
+
+// per-cpu interrupt controller
+static uint64_t slavio_intctl_mem_readl(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ SLAVIO_CPUINTCTLState *s = opaque;
+ uint32_t saddr, ret;
+
+ saddr = addr >> 2;
+ switch (saddr) {
+ case 0:
+ ret = s->intreg_pending;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ trace_slavio_intctl_mem_readl(s->cpu, addr, ret);
+
+ return ret;
+}
+
+static void slavio_intctl_mem_writel(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ SLAVIO_CPUINTCTLState *s = opaque;
+ uint32_t saddr;
+
+ saddr = addr >> 2;
+ trace_slavio_intctl_mem_writel(s->cpu, addr, val);
+ switch (saddr) {
+ case 1: // clear pending softints
+ val &= CPU_SOFTIRQ_MASK | CPU_IRQ_INT15_IN;
+ s->intreg_pending &= ~val;
+ slavio_check_interrupts(s->master, 1);
+ trace_slavio_intctl_mem_writel_clear(s->cpu, val, s->intreg_pending);
+ break;
+ case 2: // set softint
+ val &= CPU_SOFTIRQ_MASK;
+ s->intreg_pending |= val;
+ slavio_check_interrupts(s->master, 1);
+ trace_slavio_intctl_mem_writel_set(s->cpu, val, s->intreg_pending);
+ break;
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps slavio_intctl_mem_ops = {
+ .read = slavio_intctl_mem_readl,
+ .write = slavio_intctl_mem_writel,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+// master system interrupt controller
+static uint64_t slavio_intctlm_mem_readl(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ SLAVIO_INTCTLState *s = opaque;
+ uint32_t saddr, ret;
+
+ saddr = addr >> 2;
+ switch (saddr) {
+ case 0:
+ ret = s->intregm_pending & ~MASTER_DISABLE;
+ break;
+ case 1:
+ ret = s->intregm_disabled & MASTER_IRQ_MASK;
+ break;
+ case 4:
+ ret = s->target_cpu;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ trace_slavio_intctlm_mem_readl(addr, ret);
+
+ return ret;
+}
+
+static void slavio_intctlm_mem_writel(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ SLAVIO_INTCTLState *s = opaque;
+ uint32_t saddr;
+
+ saddr = addr >> 2;
+ trace_slavio_intctlm_mem_writel(addr, val);
+ switch (saddr) {
+ case 2: // clear (enable)
+ // Force clear unused bits
+ val &= MASTER_IRQ_MASK;
+ s->intregm_disabled &= ~val;
+ trace_slavio_intctlm_mem_writel_enable(val, s->intregm_disabled);
+ slavio_check_interrupts(s, 1);
+ break;
+ case 3: // set (disable; doesn't affect pending)
+ // Force clear unused bits
+ val &= MASTER_IRQ_MASK;
+ s->intregm_disabled |= val;
+ slavio_check_interrupts(s, 1);
+ trace_slavio_intctlm_mem_writel_disable(val, s->intregm_disabled);
+ break;
+ case 4:
+ s->target_cpu = val & (MAX_CPUS - 1);
+ slavio_check_interrupts(s, 1);
+ trace_slavio_intctlm_mem_writel_target(s->target_cpu);
+ break;
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps slavio_intctlm_mem_ops = {
+ .read = slavio_intctlm_mem_readl,
+ .write = slavio_intctlm_mem_writel,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const uint32_t intbit_to_level[] = {
+ 2, 3, 5, 7, 9, 11, 13, 2, 3, 5, 7, 9, 11, 13, 12, 12,
+ 6, 13, 4, 10, 8, 9, 11, 0, 0, 0, 0, 15, 15, 15, 15, 0,
+};
+
+static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs)
+{
+ uint32_t pending = s->intregm_pending, pil_pending;
+ unsigned int i, j;
+
+ pending &= ~s->intregm_disabled;
+
+ trace_slavio_check_interrupts(pending, s->intregm_disabled);
+ for (i = 0; i < MAX_CPUS; i++) {
+ pil_pending = 0;
+
+ /* If we are the current interrupt target, get hard interrupts */
+ if (pending && !(s->intregm_disabled & MASTER_DISABLE) &&
+ (i == s->target_cpu)) {
+ for (j = 0; j < 32; j++) {
+ if ((pending & (1 << j)) && intbit_to_level[j]) {
+ pil_pending |= 1 << intbit_to_level[j];
+ }
+ }
+ }
+
+ /* Calculate current pending hard interrupts for display */
+ s->slaves[i].intreg_pending &= CPU_SOFTIRQ_MASK | CPU_IRQ_INT15_IN |
+ CPU_IRQ_TIMER_IN;
+ if (i == s->target_cpu) {
+ for (j = 0; j < 32; j++) {
+ if ((s->intregm_pending & (1U << j)) && intbit_to_level[j]) {
+ s->slaves[i].intreg_pending |= 1 << intbit_to_level[j];
+ }
+ }
+ }
+
+ /* Level 15 and CPU timer interrupts are only masked when
+ the MASTER_DISABLE bit is set */
+ if (!(s->intregm_disabled & MASTER_DISABLE)) {
+ pil_pending |= s->slaves[i].intreg_pending &
+ (CPU_IRQ_INT15_IN | CPU_IRQ_TIMER_IN);
+ }
+
+ /* Add soft interrupts */
+ pil_pending |= (s->slaves[i].intreg_pending & CPU_SOFTIRQ_MASK) >> 16;
+
+ if (set_irqs) {
+ /* Since there is not really an interrupt 0 (and pil_pending
+ * and irl_out bit zero are thus always zero) there is no need
+ * to do anything with cpu_irqs[i][0] and it is OK not to do
+ * the j=0 iteration of this loop.
+ */
+ for (j = MAX_PILS-1; j > 0; j--) {
+ if (pil_pending & (1 << j)) {
+ if (!(s->slaves[i].irl_out & (1 << j))) {
+ qemu_irq_raise(s->cpu_irqs[i][j]);
+ }
+ } else {
+ if (s->slaves[i].irl_out & (1 << j)) {
+ qemu_irq_lower(s->cpu_irqs[i][j]);
+ }
+ }
+ }
+ }
+ s->slaves[i].irl_out = pil_pending;
+ }
+}
+
+/*
+ * "irq" here is the bit number in the system interrupt register to
+ * separate serial and keyboard interrupts sharing a level.
+ */
+static void slavio_set_irq(void *opaque, int irq, int level)
+{
+ SLAVIO_INTCTLState *s = opaque;
+ uint32_t mask = 1 << irq;
+ uint32_t pil = intbit_to_level[irq];
+ unsigned int i;
+
+ trace_slavio_set_irq(s->target_cpu, irq, pil, level);
+ if (pil > 0) {
+ if (level) {
+#ifdef DEBUG_IRQ_COUNT
+ s->irq_count[pil]++;
+#endif
+ s->intregm_pending |= mask;
+ if (pil == 15) {
+ for (i = 0; i < MAX_CPUS; i++) {
+ s->slaves[i].intreg_pending |= 1 << pil;
+ }
+ }
+ } else {
+ s->intregm_pending &= ~mask;
+ if (pil == 15) {
+ for (i = 0; i < MAX_CPUS; i++) {
+ s->slaves[i].intreg_pending &= ~(1 << pil);
+ }
+ }
+ }
+ slavio_check_interrupts(s, 1);
+ }
+}
+
+static void slavio_set_timer_irq_cpu(void *opaque, int cpu, int level)
+{
+ SLAVIO_INTCTLState *s = opaque;
+
+ trace_slavio_set_timer_irq_cpu(cpu, level);
+
+ if (level) {
+ s->slaves[cpu].intreg_pending |= CPU_IRQ_TIMER_IN;
+ } else {
+ s->slaves[cpu].intreg_pending &= ~CPU_IRQ_TIMER_IN;
+ }
+
+ slavio_check_interrupts(s, 1);
+}
+
+static void slavio_set_irq_all(void *opaque, int irq, int level)
+{
+ if (irq < 32) {
+ slavio_set_irq(opaque, irq, level);
+ } else {
+ slavio_set_timer_irq_cpu(opaque, irq - 32, level);
+ }
+}
+
+static int vmstate_intctl_post_load(void *opaque, int version_id)
+{
+ SLAVIO_INTCTLState *s = opaque;
+
+ slavio_check_interrupts(s, 0);
+ return 0;
+}
+
+static const VMStateDescription vmstate_intctl_cpu = {
+ .name ="slavio_intctl_cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(intreg_pending, SLAVIO_CPUINTCTLState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_intctl = {
+ .name ="slavio_intctl",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = vmstate_intctl_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(slaves, SLAVIO_INTCTLState, MAX_CPUS, 1,
+ vmstate_intctl_cpu, SLAVIO_CPUINTCTLState),
+ VMSTATE_UINT32(intregm_pending, SLAVIO_INTCTLState),
+ VMSTATE_UINT32(intregm_disabled, SLAVIO_INTCTLState),
+ VMSTATE_UINT32(target_cpu, SLAVIO_INTCTLState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void slavio_intctl_reset(DeviceState *d)
+{
+ SLAVIO_INTCTLState *s = SLAVIO_INTCTL(d);
+ int i;
+
+ for (i = 0; i < MAX_CPUS; i++) {
+ s->slaves[i].intreg_pending = 0;
+ s->slaves[i].irl_out = 0;
+ }
+ s->intregm_disabled = ~MASTER_IRQ_MASK;
+ s->intregm_pending = 0;
+ s->target_cpu = 0;
+ slavio_check_interrupts(s, 0);
+}
+
+#ifdef DEBUG_IRQ_COUNT
+static bool slavio_intctl_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts,
+ unsigned int *nb_irqs)
+{
+ SLAVIO_INTCTLState *s = SLAVIO_INTCTL(obj);
+ *irq_counts = s->irq_count;
+ *nb_irqs = ARRAY_SIZE(s->irq_count);
+ return true;
+}
+#endif
+
+static void slavio_intctl_print_info(InterruptStatsProvider *obj, Monitor *mon)
+{
+ SLAVIO_INTCTLState *s = SLAVIO_INTCTL(obj);
+ int i;
+
+ for (i = 0; i < MAX_CPUS; i++) {
+ monitor_printf(mon, "per-cpu %d: pending 0x%08x\n", i,
+ s->slaves[i].intreg_pending);
+ }
+ monitor_printf(mon, "master: pending 0x%08x, disabled 0x%08x\n",
+ s->intregm_pending, s->intregm_disabled);
+}
+
+static void slavio_intctl_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ SLAVIO_INTCTLState *s = SLAVIO_INTCTL(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ unsigned int i, j;
+ char slave_name[45];
+
+ qdev_init_gpio_in(dev, slavio_set_irq_all, 32 + MAX_CPUS);
+ memory_region_init_io(&s->iomem, obj, &slavio_intctlm_mem_ops, s,
+ "master-interrupt-controller", INTCTLM_SIZE);
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ for (i = 0; i < MAX_CPUS; i++) {
+ snprintf(slave_name, sizeof(slave_name),
+ "slave-interrupt-controller-%i", i);
+ for (j = 0; j < MAX_PILS; j++) {
+ sysbus_init_irq(sbd, &s->cpu_irqs[i][j]);
+ }
+ memory_region_init_io(&s->slaves[i].iomem, OBJECT(s),
+ &slavio_intctl_mem_ops,
+ &s->slaves[i], slave_name, INTCTL_SIZE);
+ sysbus_init_mmio(sbd, &s->slaves[i].iomem);
+ s->slaves[i].cpu = i;
+ s->slaves[i].master = s;
+ }
+}
+
+static void slavio_intctl_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
+
+ dc->reset = slavio_intctl_reset;
+ dc->vmsd = &vmstate_intctl;
+#ifdef DEBUG_IRQ_COUNT
+ ic->get_statistics = slavio_intctl_get_statistics;
+#endif
+ ic->print_info = slavio_intctl_print_info;
+}
+
+static const TypeInfo slavio_intctl_info = {
+ .name = TYPE_SLAVIO_INTCTL,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SLAVIO_INTCTLState),
+ .instance_init = slavio_intctl_init,
+ .class_init = slavio_intctl_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
+};
+
+static void slavio_intctl_register_types(void)
+{
+ type_register_static(&slavio_intctl_info);
+}
+
+type_init(slavio_intctl_register_types)
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
new file mode 100644
index 000000000..4ec659b93
--- /dev/null
+++ b/hw/intc/spapr_xive.c
@@ -0,0 +1,1830 @@
+/*
+ * QEMU PowerPC sPAPR XIVE interrupt controller model
+ *
+ * Copyright (c) 2017-2018, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "target/ppc/cpu.h"
+#include "sysemu/cpus.h"
+#include "sysemu/reset.h"
+#include "migration/vmstate.h"
+#include "monitor/monitor.h"
+#include "hw/ppc/fdt.h"
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_cpu_core.h"
+#include "hw/ppc/spapr_xive.h"
+#include "hw/ppc/xive.h"
+#include "hw/ppc/xive_regs.h"
+#include "hw/qdev-properties.h"
+#include "trace.h"
+
+/*
+ * XIVE Virtualization Controller BAR and Thread Managment BAR that we
+ * use for the ESB pages and the TIMA pages
+ */
+#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
+#define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
+
+/*
+ * The allocation of VP blocks is a complex operation in OPAL and the
+ * VP identifiers have a relation with the number of HW chips, the
+ * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE
+ * controller model does not have the same constraints and can use a
+ * simple mapping scheme of the CPU vcpu_id
+ *
+ * These identifiers are never returned to the OS.
+ */
+
+#define SPAPR_XIVE_NVT_BASE 0x400
+
+/*
+ * sPAPR NVT and END indexing helpers
+ */
+static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
+{
+ return nvt_idx - SPAPR_XIVE_NVT_BASE;
+}
+
+static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
+ uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
+{
+ assert(cpu);
+
+ if (out_nvt_blk) {
+ *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
+ }
+
+ if (out_nvt_blk) {
+ *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
+ }
+}
+
+static int spapr_xive_target_to_nvt(uint32_t target,
+ uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
+{
+ PowerPCCPU *cpu = spapr_find_cpu(target);
+
+ if (!cpu) {
+ return -1;
+ }
+
+ spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
+ return 0;
+}
+
+/*
+ * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8
+ * priorities per CPU
+ */
+int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
+ uint32_t *out_server, uint8_t *out_prio)
+{
+
+ assert(end_blk == SPAPR_XIVE_BLOCK_ID);
+
+ if (out_server) {
+ *out_server = end_idx >> 3;
+ }
+
+ if (out_prio) {
+ *out_prio = end_idx & 0x7;
+ }
+ return 0;
+}
+
+static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
+ uint8_t *out_end_blk, uint32_t *out_end_idx)
+{
+ assert(cpu);
+
+ if (out_end_blk) {
+ *out_end_blk = SPAPR_XIVE_BLOCK_ID;
+ }
+
+ if (out_end_idx) {
+ *out_end_idx = (cpu->vcpu_id << 3) + prio;
+ }
+}
+
+static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
+ uint8_t *out_end_blk, uint32_t *out_end_idx)
+{
+ PowerPCCPU *cpu = spapr_find_cpu(target);
+
+ if (!cpu) {
+ return -1;
+ }
+
+ spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
+ return 0;
+}
+
+/*
+ * On sPAPR machines, use a simplified output for the XIVE END
+ * structure dumping only the information related to the OS EQ.
+ */
+static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
+ Monitor *mon)
+{
+ uint64_t qaddr_base = xive_end_qaddr(end);
+ uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
+ uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
+ uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
+ uint32_t qentries = 1 << (qsize + 10);
+ uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
+ uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
+
+ monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
+ spapr_xive_nvt_to_target(0, nvt),
+ priority, qindex, qentries, qaddr_base, qgen);
+
+ xive_end_queue_pic_print_info(end, 6, mon);
+}
+
+/*
+ * kvm_irqchip_in_kernel() will cause the compiler to turn this
+ * info a nop if CONFIG_KVM isn't defined.
+ */
+#define spapr_xive_in_kernel(xive) \
+ (kvm_irqchip_in_kernel() && (xive)->fd != -1)
+
+static void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
+{
+ XiveSource *xsrc = &xive->source;
+ int i;
+
+ if (spapr_xive_in_kernel(xive)) {
+ Error *local_err = NULL;
+
+ kvmppc_xive_synchronize_state(xive, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return;
+ }
+ }
+
+ monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n");
+
+ for (i = 0; i < xive->nr_irqs; i++) {
+ uint8_t pq = xive_source_esb_get(xsrc, i);
+ XiveEAS *eas = &xive->eat[i];
+
+ if (!xive_eas_is_valid(eas)) {
+ continue;
+ }
+
+ monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i,
+ xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
+ pq & XIVE_ESB_VAL_P ? 'P' : '-',
+ pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
+ xive_source_is_asserted(xsrc, i) ? 'A' : ' ',
+ xive_eas_is_masked(eas) ? "M" : " ",
+ (int) xive_get_field64(EAS_END_DATA, eas->w));
+
+ if (!xive_eas_is_masked(eas)) {
+ uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
+ XiveEND *end;
+
+ assert(end_idx < xive->nr_ends);
+ end = &xive->endt[end_idx];
+
+ if (xive_end_is_valid(end)) {
+ spapr_xive_end_pic_print_info(xive, end, mon);
+ }
+ }
+ monitor_printf(mon, "\n");
+ }
+}
+
+void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
+{
+ memory_region_set_enabled(&xive->source.esb_mmio, enable);
+ memory_region_set_enabled(&xive->tm_mmio, enable);
+
+ /* Disable the END ESBs until a guest OS makes use of them */
+ memory_region_set_enabled(&xive->end_source.esb_mmio, false);
+}
+
+static void spapr_xive_tm_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
+
+ xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
+}
+
+static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
+{
+ XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
+
+ return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
+}
+
+const MemoryRegionOps spapr_xive_tm_ops = {
+ .read = spapr_xive_tm_read,
+ .write = spapr_xive_tm_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+static void spapr_xive_end_reset(XiveEND *end)
+{
+ memset(end, 0, sizeof(*end));
+
+ /* switch off the escalation and notification ESBs */
+ end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
+}
+
+static void spapr_xive_reset(void *dev)
+{
+ SpaprXive *xive = SPAPR_XIVE(dev);
+ int i;
+
+ /*
+ * The XiveSource has its own reset handler, which mask off all
+ * IRQs (!P|Q)
+ */
+
+ /* Mask all valid EASs in the IRQ number space. */
+ for (i = 0; i < xive->nr_irqs; i++) {
+ XiveEAS *eas = &xive->eat[i];
+ if (xive_eas_is_valid(eas)) {
+ eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
+ } else {
+ eas->w = 0;
+ }
+ }
+
+ /* Clear all ENDs */
+ for (i = 0; i < xive->nr_ends; i++) {
+ spapr_xive_end_reset(&xive->endt[i]);
+ }
+}
+
+static void spapr_xive_instance_init(Object *obj)
+{
+ SpaprXive *xive = SPAPR_XIVE(obj);
+
+ object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE);
+
+ object_initialize_child(obj, "end_source", &xive->end_source,
+ TYPE_XIVE_END_SOURCE);
+
+ /* Not connected to the KVM XIVE device */
+ xive->fd = -1;
+}
+
+static void spapr_xive_realize(DeviceState *dev, Error **errp)
+{
+ SpaprXive *xive = SPAPR_XIVE(dev);
+ SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive);
+ XiveSource *xsrc = &xive->source;
+ XiveENDSource *end_xsrc = &xive->end_source;
+ Error *local_err = NULL;
+
+ /* Set by spapr_irq_init() */
+ g_assert(xive->nr_irqs);
+ g_assert(xive->nr_ends);
+
+ sxc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /*
+ * Initialize the internal sources, for IPIs and virtual devices.
+ */
+ object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs,
+ &error_fatal);
+ object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
+ if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
+ return;
+ }
+ sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
+
+ /*
+ * Initialize the END ESB source
+ */
+ object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs,
+ &error_fatal);
+ object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
+ &error_abort);
+ if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
+ return;
+ }
+ sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
+
+ /* Set the mapping address of the END ESB pages after the source ESBs */
+ xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
+
+ /*
+ * Allocate the routing tables
+ */
+ xive->eat = g_new0(XiveEAS, xive->nr_irqs);
+ xive->endt = g_new0(XiveEND, xive->nr_ends);
+
+ xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
+ xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
+
+ qemu_register_reset(spapr_xive_reset, dev);
+
+ /* TIMA initialization */
+ memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops,
+ xive, "xive.tima", 4ull << TM_SHIFT);
+ sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
+
+ /*
+ * Map all regions. These will be enabled or disabled at reset and
+ * can also be overridden by KVM memory regions if active
+ */
+ sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
+ sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
+ sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
+}
+
+static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
+ uint32_t eas_idx, XiveEAS *eas)
+{
+ SpaprXive *xive = SPAPR_XIVE(xrtr);
+
+ if (eas_idx >= xive->nr_irqs) {
+ return -1;
+ }
+
+ *eas = xive->eat[eas_idx];
+ return 0;
+}
+
+static int spapr_xive_get_end(XiveRouter *xrtr,
+ uint8_t end_blk, uint32_t end_idx, XiveEND *end)
+{
+ SpaprXive *xive = SPAPR_XIVE(xrtr);
+
+ if (end_idx >= xive->nr_ends) {
+ return -1;
+ }
+
+ memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
+ return 0;
+}
+
+static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
+ uint32_t end_idx, XiveEND *end,
+ uint8_t word_number)
+{
+ SpaprXive *xive = SPAPR_XIVE(xrtr);
+
+ if (end_idx >= xive->nr_ends) {
+ return -1;
+ }
+
+ memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
+ return 0;
+}
+
+static int spapr_xive_get_nvt(XiveRouter *xrtr,
+ uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
+{
+ uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
+ PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
+
+ if (!cpu) {
+ /* TODO: should we assert() if we can find a NVT ? */
+ return -1;
+ }
+
+ /*
+ * sPAPR does not maintain a NVT table. Return that the NVT is
+ * valid if we have found a matching CPU
+ */
+ nvt->w0 = cpu_to_be32(NVT_W0_VALID);
+ return 0;
+}
+
+static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
+ uint32_t nvt_idx, XiveNVT *nvt,
+ uint8_t word_number)
+{
+ /*
+ * We don't need to write back to the NVTs because the sPAPR
+ * machine should never hit a non-scheduled NVT. It should never
+ * get called.
+ */
+ g_assert_not_reached();
+}
+
+static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv, XiveTCTXMatch *match)
+{
+ CPUState *cs;
+ int count = 0;
+
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
+ int ring;
+
+ /*
+ * Skip partially initialized vCPUs. This can happen when
+ * vCPUs are hotplugged.
+ */
+ if (!tctx) {
+ continue;
+ }
+
+ /*
+ * Check the thread context CAM lines and record matches.
+ */
+ ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx,
+ cam_ignore, logic_serv);
+ /*
+ * Save the matching thread interrupt context and follow on to
+ * check for duplicates which are invalid.
+ */
+ if (ring != -1) {
+ if (match->tctx) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
+ "context NVT %x/%x\n", nvt_blk, nvt_idx);
+ return -1;
+ }
+
+ match->ring = ring;
+ match->tctx = tctx;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
+{
+ return SPAPR_XIVE_BLOCK_ID;
+}
+
+static const VMStateDescription vmstate_spapr_xive_end = {
+ .name = TYPE_SPAPR_XIVE "/end",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32(w0, XiveEND),
+ VMSTATE_UINT32(w1, XiveEND),
+ VMSTATE_UINT32(w2, XiveEND),
+ VMSTATE_UINT32(w3, XiveEND),
+ VMSTATE_UINT32(w4, XiveEND),
+ VMSTATE_UINT32(w5, XiveEND),
+ VMSTATE_UINT32(w6, XiveEND),
+ VMSTATE_UINT32(w7, XiveEND),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_spapr_xive_eas = {
+ .name = TYPE_SPAPR_XIVE "/eas",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT64(w, XiveEAS),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static int vmstate_spapr_xive_pre_save(void *opaque)
+{
+ SpaprXive *xive = SPAPR_XIVE(opaque);
+
+ if (spapr_xive_in_kernel(xive)) {
+ return kvmppc_xive_pre_save(xive);
+ }
+
+ return 0;
+}
+
+/*
+ * Called by the sPAPR IRQ backend 'post_load' method at the machine
+ * level.
+ */
+static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+
+ if (spapr_xive_in_kernel(xive)) {
+ return kvmppc_xive_post_load(xive, version_id);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_spapr_xive = {
+ .name = TYPE_SPAPR_XIVE,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = vmstate_spapr_xive_pre_save,
+ .post_load = NULL, /* handled at the machine level */
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
+ vmstate_spapr_xive_eas, XiveEAS),
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
+ vmstate_spapr_xive_end, XiveEND),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
+ bool lsi, Error **errp)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+ XiveSource *xsrc = &xive->source;
+
+ assert(lisn < xive->nr_irqs);
+
+ trace_spapr_xive_claim_irq(lisn, lsi);
+
+ if (xive_eas_is_valid(&xive->eat[lisn])) {
+ error_setg(errp, "IRQ %d is not free", lisn);
+ return -EBUSY;
+ }
+
+ /*
+ * Set default values when allocating an IRQ number
+ */
+ xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
+ if (lsi) {
+ xive_source_irq_set_lsi(xsrc, lisn);
+ }
+
+ if (spapr_xive_in_kernel(xive)) {
+ return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
+ }
+
+ return 0;
+}
+
+static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+ assert(lisn < xive->nr_irqs);
+
+ trace_spapr_xive_free_irq(lisn);
+
+ xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
+}
+
+static Property spapr_xive_properties[] = {
+ DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
+ DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
+ DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
+ DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
+ DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
+ PowerPCCPU *cpu, Error **errp)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+ Object *obj;
+ SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+
+ obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp);
+ if (!obj) {
+ return -1;
+ }
+
+ spapr_cpu->tctx = XIVE_TCTX(obj);
+ return 0;
+}
+
+static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
+{
+ uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
+ memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
+}
+
+static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
+ PowerPCCPU *cpu)
+{
+ XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
+ uint8_t nvt_blk;
+ uint32_t nvt_idx;
+
+ xive_tctx_reset(tctx);
+
+ /*
+ * When a Virtual Processor is scheduled to run on a HW thread,
+ * the hypervisor pushes its identifier in the OS CAM line.
+ * Emulate the same behavior under QEMU.
+ */
+ spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
+
+ xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
+}
+
+static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc,
+ PowerPCCPU *cpu)
+{
+ SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+
+ xive_tctx_destroy(spapr_cpu->tctx);
+ spapr_cpu->tctx = NULL;
+}
+
+static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+
+ trace_spapr_xive_set_irq(irq, val);
+
+ if (spapr_xive_in_kernel(xive)) {
+ kvmppc_xive_source_set_irq(&xive->source, irq, val);
+ } else {
+ xive_source_set_irq(&xive->source, irq, val);
+ }
+}
+
+static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+ CPUState *cs;
+
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
+ }
+
+ spapr_xive_pic_print_info(xive, mon);
+}
+
+static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
+ void *fdt, uint32_t phandle)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+ int node;
+ uint64_t timas[2 * 2];
+ /* Interrupt number ranges for the IPIs */
+ uint32_t lisn_ranges[] = {
+ cpu_to_be32(SPAPR_IRQ_IPI),
+ cpu_to_be32(SPAPR_IRQ_IPI + nr_servers),
+ };
+ /*
+ * EQ size - the sizes of pages supported by the system 4K, 64K,
+ * 2M, 16M. We only advertise 64K for the moment.
+ */
+ uint32_t eq_sizes[] = {
+ cpu_to_be32(16), /* 64K */
+ };
+ /*
+ * QEMU/KVM only needs to define a single range to reserve the
+ * escalation priority. A priority bitmask would have been more
+ * appropriate.
+ */
+ uint32_t plat_res_int_priorities[] = {
+ cpu_to_be32(xive->hv_prio), /* start */
+ cpu_to_be32(0xff - xive->hv_prio), /* count */
+ };
+
+ /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
+ timas[0] = cpu_to_be64(xive->tm_base +
+ XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
+ timas[1] = cpu_to_be64(1ull << TM_SHIFT);
+ timas[2] = cpu_to_be64(xive->tm_base +
+ XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
+ timas[3] = cpu_to_be64(1ull << TM_SHIFT);
+
+ _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
+
+ _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
+ _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
+
+ _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
+ _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
+ sizeof(eq_sizes)));
+ _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
+ sizeof(lisn_ranges)));
+
+ /* For Linux to link the LSIs to the interrupt controller. */
+ _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
+ _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
+
+ /* For SLOF */
+ _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
+ _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
+
+ /*
+ * The "ibm,plat-res-int-priorities" property defines the priority
+ * ranges reserved by the hypervisor
+ */
+ _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
+ plat_res_int_priorities, sizeof(plat_res_int_priorities)));
+}
+
+static int spapr_xive_activate(SpaprInterruptController *intc,
+ uint32_t nr_servers, Error **errp)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+
+ if (kvm_enabled()) {
+ int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers,
+ errp);
+ if (rc < 0) {
+ return rc;
+ }
+ }
+
+ /* Activate the XIVE MMIOs */
+ spapr_xive_mmio_set_enabled(xive, true);
+
+ return 0;
+}
+
+static void spapr_xive_deactivate(SpaprInterruptController *intc)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+
+ spapr_xive_mmio_set_enabled(xive, false);
+
+ if (spapr_xive_in_kernel(xive)) {
+ kvmppc_xive_disconnect(intc);
+ }
+}
+
+static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
+{
+ return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
+}
+
+static void spapr_xive_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
+ SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
+ XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
+ SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass);
+
+ dc->desc = "sPAPR XIVE Interrupt Controller";
+ device_class_set_props(dc, spapr_xive_properties);
+ device_class_set_parent_realize(dc, spapr_xive_realize,
+ &sxc->parent_realize);
+ dc->vmsd = &vmstate_spapr_xive;
+
+ xrc->get_eas = spapr_xive_get_eas;
+ xrc->get_end = spapr_xive_get_end;
+ xrc->write_end = spapr_xive_write_end;
+ xrc->get_nvt = spapr_xive_get_nvt;
+ xrc->write_nvt = spapr_xive_write_nvt;
+ xrc->get_block_id = spapr_xive_get_block_id;
+
+ sicc->activate = spapr_xive_activate;
+ sicc->deactivate = spapr_xive_deactivate;
+ sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
+ sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
+ sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy;
+ sicc->claim_irq = spapr_xive_claim_irq;
+ sicc->free_irq = spapr_xive_free_irq;
+ sicc->set_irq = spapr_xive_set_irq;
+ sicc->print_info = spapr_xive_print_info;
+ sicc->dt = spapr_xive_dt;
+ sicc->post_load = spapr_xive_post_load;
+
+ xpc->match_nvt = spapr_xive_match_nvt;
+ xpc->in_kernel = spapr_xive_in_kernel_xptr;
+}
+
+static const TypeInfo spapr_xive_info = {
+ .name = TYPE_SPAPR_XIVE,
+ .parent = TYPE_XIVE_ROUTER,
+ .instance_init = spapr_xive_instance_init,
+ .instance_size = sizeof(SpaprXive),
+ .class_init = spapr_xive_class_init,
+ .class_size = sizeof(SpaprXiveClass),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_SPAPR_INTC },
+ { }
+ },
+};
+
+static void spapr_xive_register_types(void)
+{
+ type_register_static(&spapr_xive_info);
+}
+
+type_init(spapr_xive_register_types)
+
+/*
+ * XIVE hcalls
+ *
+ * The terminology used by the XIVE hcalls is the following :
+ *
+ * TARGET vCPU number
+ * EQ Event Queue assigned by OS to receive event data
+ * ESB page for source interrupt management
+ * LISN Logical Interrupt Source Number identifying a source in the
+ * machine
+ * EISN Effective Interrupt Source Number used by guest OS to
+ * identify source in the guest
+ *
+ * The EAS, END, NVT structures are not exposed.
+ */
+
+/*
+ * On POWER9, the KVM XIVE device uses priority 7 for the escalation
+ * interrupts. So we only allow the guest to use priorities [0..6].
+ */
+static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority)
+{
+ return priority >= xive->hv_prio;
+}
+
+/*
+ * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical
+ * real address of the MMIO page through which the Event State Buffer
+ * entry associated with the value of the "lisn" parameter is managed.
+ *
+ * Parameters:
+ * Input
+ * - R4: "flags"
+ * Bits 0-63 reserved
+ * - R5: "lisn" is per "interrupts", "interrupt-map", or
+ * "ibm,xive-lisn-ranges" properties, or as returned by the
+ * ibm,query-interrupt-source-number RTAS call, or as returned
+ * by the H_ALLOCATE_VAS_WINDOW hcall
+ *
+ * Output
+ * - R4: "flags"
+ * Bits 0-59: Reserved
+ * Bit 60: H_INT_ESB must be used for Event State Buffer
+ * management
+ * Bit 61: 1 == LSI 0 == MSI
+ * Bit 62: the full function page supports trigger
+ * Bit 63: Store EOI Supported
+ * - R5: Logical Real address of full function Event State Buffer
+ * management page, -1 if H_INT_ESB hcall flag is set to 1.
+ * - R6: Logical Real Address of trigger only Event State Buffer
+ * management page or -1.
+ * - R7: Power of 2 page size for the ESB management pages returned in
+ * R5 and R6.
+ */
+
+#define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */
+#define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */
+#define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management
+ on same page */
+#define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */
+
+static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ SpaprXive *xive = spapr->xive;
+ XiveSource *xsrc = &xive->source;
+ target_ulong flags = args[0];
+ target_ulong lisn = args[1];
+
+ trace_spapr_xive_get_source_info(flags, lisn);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ if (flags) {
+ return H_PARAMETER;
+ }
+
+ if (lisn >= xive->nr_irqs) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ if (!xive_eas_is_valid(&xive->eat[lisn])) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ /*
+ * All sources are emulated under the main XIVE object and share
+ * the same characteristics.
+ */
+ args[0] = 0;
+ if (!xive_source_esb_has_2page(xsrc)) {
+ args[0] |= SPAPR_XIVE_SRC_TRIGGER;
+ }
+ if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
+ args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
+ }
+
+ /*
+ * Force the use of the H_INT_ESB hcall in case of an LSI
+ * interrupt. This is necessary under KVM to re-trigger the
+ * interrupt if the level is still asserted
+ */
+ if (xive_source_irq_is_lsi(xsrc, lisn)) {
+ args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
+ }
+
+ if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
+ args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
+ } else {
+ args[1] = -1;
+ }
+
+ if (xive_source_esb_has_2page(xsrc) &&
+ !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
+ args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
+ } else {
+ args[2] = -1;
+ }
+
+ if (xive_source_esb_has_2page(xsrc)) {
+ args[3] = xsrc->esb_shift - 1;
+ } else {
+ args[3] = xsrc->esb_shift;
+ }
+
+ return H_SUCCESS;
+}
+
+/*
+ * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical
+ * Interrupt Source to a target. The Logical Interrupt Source is
+ * designated with the "lisn" parameter and the target is designated
+ * with the "target" and "priority" parameters. Upon return from the
+ * hcall(), no additional interrupts will be directed to the old EQ.
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-61: Reserved
+ * Bit 62: set the "eisn" in the EAS
+ * Bit 63: masks the interrupt source in the hardware interrupt
+ * control structure. An interrupt masked by this mechanism will
+ * be dropped, but it's source state bits will still be
+ * set. There is no race-free way of unmasking and restoring the
+ * source. Thus this should only be used in interrupts that are
+ * also masked at the source, and only in cases where the
+ * interrupt is not meant to be used for a large amount of time
+ * because no valid target exists for it for example
+ * - R5: "lisn" is per "interrupts", "interrupt-map", or
+ * "ibm,xive-lisn-ranges" properties, or as returned by the
+ * ibm,query-interrupt-source-number RTAS call, or as returned by
+ * the H_ALLOCATE_VAS_WINDOW hcall
+ * - R6: "target" is per "ibm,ppc-interrupt-server#s" or
+ * "ibm,ppc-interrupt-gserver#s"
+ * - R7: "priority" is a valid priority not in
+ * "ibm,plat-res-int-priorities"
+ * - R8: "eisn" is the guest EISN associated with the "lisn"
+ *
+ * Output:
+ * - None
+ */
+
+#define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
+#define SPAPR_XIVE_SRC_MASK PPC_BIT(63)
+
+static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ SpaprXive *xive = spapr->xive;
+ XiveEAS eas, new_eas;
+ target_ulong flags = args[0];
+ target_ulong lisn = args[1];
+ target_ulong target = args[2];
+ target_ulong priority = args[3];
+ target_ulong eisn = args[4];
+ uint8_t end_blk;
+ uint32_t end_idx;
+
+ trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
+ return H_PARAMETER;
+ }
+
+ if (lisn >= xive->nr_irqs) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ eas = xive->eat[lisn];
+ if (!xive_eas_is_valid(&eas)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ /* priority 0xff is used to reset the EAS */
+ if (priority == 0xff) {
+ new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
+ goto out;
+ }
+
+ if (flags & SPAPR_XIVE_SRC_MASK) {
+ new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
+ } else {
+ new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
+ }
+
+ if (spapr_xive_priority_is_reserved(xive, priority)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
+ " is reserved\n", priority);
+ return H_P4;
+ }
+
+ /*
+ * Validate that "target" is part of the list of threads allocated
+ * to the partition. For that, find the END corresponding to the
+ * target.
+ */
+ if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
+ return H_P3;
+ }
+
+ new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
+ new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
+
+ if (flags & SPAPR_XIVE_SRC_SET_EISN) {
+ new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
+ }
+
+ if (spapr_xive_in_kernel(xive)) {
+ Error *local_err = NULL;
+
+ kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return H_HARDWARE;
+ }
+ }
+
+out:
+ xive->eat[lisn] = new_eas;
+ return H_SUCCESS;
+}
+
+/*
+ * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which
+ * target/priority pair is assigned to the specified Logical Interrupt
+ * Source.
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-63 Reserved
+ * - R5: "lisn" is per "interrupts", "interrupt-map", or
+ * "ibm,xive-lisn-ranges" properties, or as returned by the
+ * ibm,query-interrupt-source-number RTAS call, or as
+ * returned by the H_ALLOCATE_VAS_WINDOW hcall
+ *
+ * Output:
+ * - R4: Target to which the specified Logical Interrupt Source is
+ * assigned
+ * - R5: Priority to which the specified Logical Interrupt Source is
+ * assigned
+ * - R6: EISN for the specified Logical Interrupt Source (this will be
+ * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG)
+ */
+static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ SpaprXive *xive = spapr->xive;
+ target_ulong flags = args[0];
+ target_ulong lisn = args[1];
+ XiveEAS eas;
+ XiveEND *end;
+ uint8_t nvt_blk;
+ uint32_t end_idx, nvt_idx;
+
+ trace_spapr_xive_get_source_config(flags, lisn);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ if (flags) {
+ return H_PARAMETER;
+ }
+
+ if (lisn >= xive->nr_irqs) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ eas = xive->eat[lisn];
+ if (!xive_eas_is_valid(&eas)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ /* EAS_END_BLOCK is unused on sPAPR */
+ end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
+
+ assert(end_idx < xive->nr_ends);
+ end = &xive->endt[end_idx];
+
+ nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
+ nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
+ args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
+
+ if (xive_eas_is_masked(&eas)) {
+ args[1] = 0xff;
+ } else {
+ args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
+ }
+
+ args[2] = xive_get_field64(EAS_END_DATA, eas.w);
+
+ return H_SUCCESS;
+}
+
+/*
+ * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real
+ * address of the notification management page associated with the
+ * specified target and priority.
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-63 Reserved
+ * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
+ * "ibm,ppc-interrupt-gserver#s"
+ * - R6: "priority" is a valid priority not in
+ * "ibm,plat-res-int-priorities"
+ *
+ * Output:
+ * - R4: Logical real address of notification page
+ * - R5: Power of 2 page size of the notification page
+ */
+static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ SpaprXive *xive = spapr->xive;
+ XiveENDSource *end_xsrc = &xive->end_source;
+ target_ulong flags = args[0];
+ target_ulong target = args[1];
+ target_ulong priority = args[2];
+ XiveEND *end;
+ uint8_t end_blk;
+ uint32_t end_idx;
+
+ trace_spapr_xive_get_queue_info(flags, target, priority);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ if (flags) {
+ return H_PARAMETER;
+ }
+
+ /*
+ * H_STATE should be returned if a H_INT_RESET is in progress.
+ * This is not needed when running the emulation under QEMU
+ */
+
+ if (spapr_xive_priority_is_reserved(xive, priority)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
+ " is reserved\n", priority);
+ return H_P3;
+ }
+
+ /*
+ * Validate that "target" is part of the list of threads allocated
+ * to the partition. For that, find the END corresponding to the
+ * target.
+ */
+ if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
+ return H_P2;
+ }
+
+ assert(end_idx < xive->nr_ends);
+ end = &xive->endt[end_idx];
+
+ args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
+ if (xive_end_is_enqueue(end)) {
+ args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
+ } else {
+ args[1] = 0;
+ }
+
+ return H_SUCCESS;
+}
+
+/*
+ * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for
+ * a given "target" and "priority". It is also used to set the
+ * notification config associated with the EQ. An EQ size of 0 is
+ * used to reset the EQ config for a given target and priority. If
+ * resetting the EQ config, the END associated with the given "target"
+ * and "priority" will be changed to disable queueing.
+ *
+ * Upon return from the hcall(), no additional interrupts will be
+ * directed to the old EQ (if one was set). The old EQ (if one was
+ * set) should be investigated for interrupts that occurred prior to
+ * or during the hcall().
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-62: Reserved
+ * Bit 63: Unconditional Notify (n) per the XIVE spec
+ * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
+ * "ibm,ppc-interrupt-gserver#s"
+ * - R6: "priority" is a valid priority not in
+ * "ibm,plat-res-int-priorities"
+ * - R7: "eventQueue": The logical real address of the start of the EQ
+ * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes"
+ *
+ * Output:
+ * - None
+ */
+
+#define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
+
+static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ SpaprXive *xive = spapr->xive;
+ target_ulong flags = args[0];
+ target_ulong target = args[1];
+ target_ulong priority = args[2];
+ target_ulong qpage = args[3];
+ target_ulong qsize = args[4];
+ XiveEND end;
+ uint8_t end_blk, nvt_blk;
+ uint32_t end_idx, nvt_idx;
+
+ trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
+ return H_PARAMETER;
+ }
+
+ /*
+ * H_STATE should be returned if a H_INT_RESET is in progress.
+ * This is not needed when running the emulation under QEMU
+ */
+
+ if (spapr_xive_priority_is_reserved(xive, priority)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
+ " is reserved\n", priority);
+ return H_P3;
+ }
+
+ /*
+ * Validate that "target" is part of the list of threads allocated
+ * to the partition. For that, find the END corresponding to the
+ * target.
+ */
+
+ if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
+ return H_P2;
+ }
+
+ assert(end_idx < xive->nr_ends);
+ memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
+
+ switch (qsize) {
+ case 12:
+ case 16:
+ case 21:
+ case 24:
+ if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
+ " is not naturally aligned with %" HWADDR_PRIx "\n",
+ qpage, (hwaddr)1 << qsize);
+ return H_P4;
+ }
+ end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
+ end.w3 = cpu_to_be32(qpage & 0xffffffff);
+ end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
+ end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
+ break;
+ case 0:
+ /* reset queue and disable queueing */
+ spapr_xive_end_reset(&end);
+ goto out;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
+ qsize);
+ return H_P5;
+ }
+
+ if (qsize) {
+ hwaddr plen = 1 << qsize;
+ void *eq;
+
+ /*
+ * Validate the guest EQ. We should also check that the queue
+ * has been zeroed by the OS.
+ */
+ eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
+ MEMTXATTRS_UNSPECIFIED);
+ if (plen != 1 << qsize) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
+ HWADDR_PRIx "\n", qpage);
+ return H_P4;
+ }
+ address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
+ }
+
+ /* "target" should have been validated above */
+ if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
+ g_assert_not_reached();
+ }
+
+ /*
+ * Ensure the priority and target are correctly set (they will not
+ * be right after allocation)
+ */
+ end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
+ xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
+ end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
+
+ if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
+ end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
+ } else {
+ end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
+ }
+
+ /*
+ * The generation bit for the END starts at 1 and The END page
+ * offset counter starts at 0.
+ */
+ end.w1 = cpu_to_be32(END_W1_GENERATION) |
+ xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
+ end.w0 |= cpu_to_be32(END_W0_VALID);
+
+ /*
+ * TODO: issue syncs required to ensure all in-flight interrupts
+ * are complete on the old END
+ */
+
+out:
+ if (spapr_xive_in_kernel(xive)) {
+ Error *local_err = NULL;
+
+ kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return H_HARDWARE;
+ }
+ }
+
+ /* Update END */
+ memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
+ return H_SUCCESS;
+}
+
+/*
+ * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given
+ * target and priority.
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-62: Reserved
+ * Bit 63: Debug: Return debug data
+ * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
+ * "ibm,ppc-interrupt-gserver#s"
+ * - R6: "priority" is a valid priority not in
+ * "ibm,plat-res-int-priorities"
+ *
+ * Output:
+ * - R4: "flags":
+ * Bits 0-61: Reserved
+ * Bit 62: The value of Event Queue Generation Number (g) per
+ * the XIVE spec if "Debug" = 1
+ * Bit 63: The value of Unconditional Notify (n) per the XIVE spec
+ * - R5: The logical real address of the start of the EQ
+ * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes"
+ * - R7: The value of Event Queue Offset Counter per XIVE spec
+ * if "Debug" = 1, else 0
+ *
+ */
+
+#define SPAPR_XIVE_END_DEBUG PPC_BIT(63)
+
+static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ SpaprXive *xive = spapr->xive;
+ target_ulong flags = args[0];
+ target_ulong target = args[1];
+ target_ulong priority = args[2];
+ XiveEND *end;
+ uint8_t end_blk;
+ uint32_t end_idx;
+
+ trace_spapr_xive_get_queue_config(flags, target, priority);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ if (flags & ~SPAPR_XIVE_END_DEBUG) {
+ return H_PARAMETER;
+ }
+
+ /*
+ * H_STATE should be returned if a H_INT_RESET is in progress.
+ * This is not needed when running the emulation under QEMU
+ */
+
+ if (spapr_xive_priority_is_reserved(xive, priority)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
+ " is reserved\n", priority);
+ return H_P3;
+ }
+
+ /*
+ * Validate that "target" is part of the list of threads allocated
+ * to the partition. For that, find the END corresponding to the
+ * target.
+ */
+ if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
+ return H_P2;
+ }
+
+ assert(end_idx < xive->nr_ends);
+ end = &xive->endt[end_idx];
+
+ args[0] = 0;
+ if (xive_end_is_notify(end)) {
+ args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
+ }
+
+ if (xive_end_is_enqueue(end)) {
+ args[1] = xive_end_qaddr(end);
+ args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
+ } else {
+ args[1] = 0;
+ args[2] = 0;
+ }
+
+ if (spapr_xive_in_kernel(xive)) {
+ Error *local_err = NULL;
+
+ kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return H_HARDWARE;
+ }
+ }
+
+ /* TODO: do we need any locking on the END ? */
+ if (flags & SPAPR_XIVE_END_DEBUG) {
+ /* Load the event queue generation number into the return flags */
+ args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
+
+ /* Load R7 with the event queue offset counter */
+ args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
+ } else {
+ args[3] = 0;
+ }
+
+ return H_SUCCESS;
+}
+
+/*
+ * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the
+ * reporting cache line pair for the calling thread. The reporting
+ * cache lines will contain the OS interrupt context when the OS
+ * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS
+ * interrupt. The reporting cache lines can be reset by inputting -1
+ * in "reportingLine". Issuing the CI store byte without reporting
+ * cache lines registered will result in the data not being accessible
+ * to the OS.
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-63: Reserved
+ * - R5: "reportingLine": The logical real address of the reporting cache
+ * line pair
+ *
+ * Output:
+ * - None
+ */
+static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ target_ulong flags = args[0];
+
+ trace_spapr_xive_set_os_reporting_line(flags);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ /*
+ * H_STATE should be returned if a H_INT_RESET is in progress.
+ * This is not needed when running the emulation under QEMU
+ */
+
+ /* TODO: H_INT_SET_OS_REPORTING_LINE */
+ return H_FUNCTION;
+}
+
+/*
+ * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical
+ * real address of the reporting cache line pair set for the input
+ * "target". If no reporting cache line pair has been set, -1 is
+ * returned.
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-63: Reserved
+ * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
+ * "ibm,ppc-interrupt-gserver#s"
+ * - R6: "reportingLine": The logical real address of the reporting
+ * cache line pair
+ *
+ * Output:
+ * - R4: The logical real address of the reporting line if set, else -1
+ */
+static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ target_ulong flags = args[0];
+
+ trace_spapr_xive_get_os_reporting_line(flags);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ /*
+ * H_STATE should be returned if a H_INT_RESET is in progress.
+ * This is not needed when running the emulation under QEMU
+ */
+
+ /* TODO: H_INT_GET_OS_REPORTING_LINE */
+ return H_FUNCTION;
+}
+
+/*
+ * The H_INT_ESB hcall() is used to issue a load or store to the ESB
+ * page for the input "lisn". This hcall is only supported for LISNs
+ * that have the ESB hcall flag set to 1 when returned from hcall()
+ * H_INT_GET_SOURCE_INFO.
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-62: Reserved
+ * bit 63: Store: Store=1, store operation, else load operation
+ * - R5: "lisn" is per "interrupts", "interrupt-map", or
+ * "ibm,xive-lisn-ranges" properties, or as returned by the
+ * ibm,query-interrupt-source-number RTAS call, or as
+ * returned by the H_ALLOCATE_VAS_WINDOW hcall
+ * - R6: "esbOffset" is the offset into the ESB page for the load or
+ * store operation
+ * - R7: "storeData" is the data to write for a store operation
+ *
+ * Output:
+ * - R4: The value of the load if load operation, else -1
+ */
+
+#define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
+
+static target_ulong h_int_esb(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ SpaprXive *xive = spapr->xive;
+ XiveEAS eas;
+ target_ulong flags = args[0];
+ target_ulong lisn = args[1];
+ target_ulong offset = args[2];
+ target_ulong data = args[3];
+ hwaddr mmio_addr;
+ XiveSource *xsrc = &xive->source;
+
+ trace_spapr_xive_esb(flags, lisn, offset, data);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ if (flags & ~SPAPR_XIVE_ESB_STORE) {
+ return H_PARAMETER;
+ }
+
+ if (lisn >= xive->nr_irqs) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ eas = xive->eat[lisn];
+ if (!xive_eas_is_valid(&eas)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ if (offset > (1ull << xsrc->esb_shift)) {
+ return H_P3;
+ }
+
+ if (spapr_xive_in_kernel(xive)) {
+ args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
+ flags & SPAPR_XIVE_ESB_STORE);
+ } else {
+ mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
+
+ if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
+ (flags & SPAPR_XIVE_ESB_STORE))) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
+ HWADDR_PRIx "\n", mmio_addr);
+ return H_HARDWARE;
+ }
+ args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
+ }
+ return H_SUCCESS;
+}
+
+/*
+ * The H_INT_SYNC hcall() is used to issue hardware syncs that will
+ * ensure any in flight events for the input lisn are in the event
+ * queue.
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-63: Reserved
+ * - R5: "lisn" is per "interrupts", "interrupt-map", or
+ * "ibm,xive-lisn-ranges" properties, or as returned by the
+ * ibm,query-interrupt-source-number RTAS call, or as
+ * returned by the H_ALLOCATE_VAS_WINDOW hcall
+ *
+ * Output:
+ * - None
+ */
+static target_ulong h_int_sync(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ SpaprXive *xive = spapr->xive;
+ XiveEAS eas;
+ target_ulong flags = args[0];
+ target_ulong lisn = args[1];
+
+ trace_spapr_xive_sync(flags, lisn);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ if (flags) {
+ return H_PARAMETER;
+ }
+
+ if (lisn >= xive->nr_irqs) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ eas = xive->eat[lisn];
+ if (!xive_eas_is_valid(&eas)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
+ lisn);
+ return H_P2;
+ }
+
+ /*
+ * H_STATE should be returned if a H_INT_RESET is in progress.
+ * This is not needed when running the emulation under QEMU
+ */
+
+ /*
+ * This is not real hardware. Nothing to be done unless when
+ * under KVM
+ */
+
+ if (spapr_xive_in_kernel(xive)) {
+ Error *local_err = NULL;
+
+ kvmppc_xive_sync_source(xive, lisn, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return H_HARDWARE;
+ }
+ }
+ return H_SUCCESS;
+}
+
+/*
+ * The H_INT_RESET hcall() is used to reset all of the partition's
+ * interrupt exploitation structures to their initial state. This
+ * means losing all previously set interrupt state set via
+ * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG.
+ *
+ * Parameters:
+ * Input:
+ * - R4: "flags"
+ * Bits 0-63: Reserved
+ *
+ * Output:
+ * - None
+ */
+static target_ulong h_int_reset(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ SpaprXive *xive = spapr->xive;
+ target_ulong flags = args[0];
+
+ trace_spapr_xive_reset(flags);
+
+ if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ return H_FUNCTION;
+ }
+
+ if (flags) {
+ return H_PARAMETER;
+ }
+
+ device_cold_reset(DEVICE(xive));
+
+ if (spapr_xive_in_kernel(xive)) {
+ Error *local_err = NULL;
+
+ kvmppc_xive_reset(xive, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return H_HARDWARE;
+ }
+ }
+ return H_SUCCESS;
+}
+
+void spapr_xive_hcall_init(SpaprMachineState *spapr)
+{
+ spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
+ spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
+ spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
+ spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
+ spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
+ spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
+ spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
+ h_int_set_os_reporting_line);
+ spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
+ h_int_get_os_reporting_line);
+ spapr_register_hypercall(H_INT_ESB, h_int_esb);
+ spapr_register_hypercall(H_INT_SYNC, h_int_sync);
+ spapr_register_hypercall(H_INT_RESET, h_int_reset);
+}
diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c
new file mode 100644
index 000000000..61fe7bd2d
--- /dev/null
+++ b/hw/intc/spapr_xive_kvm.c
@@ -0,0 +1,869 @@
+/*
+ * QEMU PowerPC sPAPR XIVE interrupt controller model
+ *
+ * Copyright (c) 2017-2019, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "target/ppc/cpu.h"
+#include "sysemu/cpus.h"
+#include "sysemu/kvm.h"
+#include "sysemu/runstate.h"
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_cpu_core.h"
+#include "hw/ppc/spapr_xive.h"
+#include "hw/ppc/xive.h"
+#include "kvm_ppc.h"
+#include "trace.h"
+
+#include <sys/ioctl.h>
+
+/*
+ * Helpers for CPU hotplug
+ *
+ * TODO: make a common KVMEnabledCPU layer for XICS and XIVE
+ */
+typedef struct KVMEnabledCPU {
+ unsigned long vcpu_id;
+ QLIST_ENTRY(KVMEnabledCPU) node;
+} KVMEnabledCPU;
+
+static QLIST_HEAD(, KVMEnabledCPU)
+ kvm_enabled_cpus = QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus);
+
+static bool kvm_cpu_is_enabled(CPUState *cs)
+{
+ KVMEnabledCPU *enabled_cpu;
+ unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
+
+ QLIST_FOREACH(enabled_cpu, &kvm_enabled_cpus, node) {
+ if (enabled_cpu->vcpu_id == vcpu_id) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static void kvm_cpu_enable(CPUState *cs)
+{
+ KVMEnabledCPU *enabled_cpu;
+ unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
+
+ enabled_cpu = g_malloc(sizeof(*enabled_cpu));
+ enabled_cpu->vcpu_id = vcpu_id;
+ QLIST_INSERT_HEAD(&kvm_enabled_cpus, enabled_cpu, node);
+}
+
+static void kvm_cpu_disable_all(void)
+{
+ KVMEnabledCPU *enabled_cpu, *next;
+
+ QLIST_FOREACH_SAFE(enabled_cpu, &kvm_enabled_cpus, node, next) {
+ QLIST_REMOVE(enabled_cpu, node);
+ g_free(enabled_cpu);
+ }
+}
+
+/*
+ * XIVE Thread Interrupt Management context (KVM)
+ */
+
+int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp)
+{
+ SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
+ uint64_t state[2];
+ int ret;
+
+ assert(xive->fd != -1);
+
+ /* word0 and word1 of the OS ring. */
+ state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]);
+
+ ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
+ if (ret != 0) {
+ error_setg_errno(errp, -ret,
+ "XIVE: could not restore KVM state of CPU %ld",
+ kvm_arch_vcpu_id(tctx->cs));
+ return ret;
+ }
+
+ return 0;
+}
+
+int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp)
+{
+ SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
+ uint64_t state[2] = { 0 };
+ int ret;
+
+ assert(xive->fd != -1);
+
+ ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
+ if (ret != 0) {
+ error_setg_errno(errp, -ret,
+ "XIVE: could not capture KVM state of CPU %ld",
+ kvm_arch_vcpu_id(tctx->cs));
+ return ret;
+ }
+
+ /* word0 and word1 of the OS ring. */
+ *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0];
+
+ return 0;
+}
+
+typedef struct {
+ XiveTCTX *tctx;
+ Error **errp;
+ int ret;
+} XiveCpuGetState;
+
+static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu,
+ run_on_cpu_data arg)
+{
+ XiveCpuGetState *s = arg.host_ptr;
+
+ s->ret = kvmppc_xive_cpu_get_state(s->tctx, s->errp);
+}
+
+int kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp)
+{
+ XiveCpuGetState s = {
+ .tctx = tctx,
+ .errp = errp,
+ };
+
+ /*
+ * Kick the vCPU to make sure they are available for the KVM ioctl.
+ */
+ run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state,
+ RUN_ON_CPU_HOST_PTR(&s));
+
+ return s.ret;
+}
+
+int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
+{
+ ERRP_GUARD();
+ SpaprXive *xive = SPAPR_XIVE(tctx->xptr);
+ unsigned long vcpu_id;
+ int ret;
+
+ assert(xive->fd != -1);
+
+ /* Check if CPU was hot unplugged and replugged. */
+ if (kvm_cpu_is_enabled(tctx->cs)) {
+ return 0;
+ }
+
+ vcpu_id = kvm_arch_vcpu_id(tctx->cs);
+
+ trace_kvm_xive_cpu_connect(vcpu_id);
+
+ ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd,
+ vcpu_id, 0);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret,
+ "XIVE: unable to connect CPU%ld to KVM device",
+ vcpu_id);
+ if (ret == -ENOSPC) {
+ error_append_hint(errp, "Try -smp maxcpus=N with N < %u\n",
+ MACHINE(qdev_get_machine())->smp.max_cpus);
+ }
+ return ret;
+ }
+
+ kvm_cpu_enable(tctx->cs);
+ return 0;
+}
+
+/*
+ * XIVE Interrupt Source (KVM)
+ */
+
+int kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
+ Error **errp)
+{
+ uint32_t end_idx;
+ uint32_t end_blk;
+ uint8_t priority;
+ uint32_t server;
+ bool masked;
+ uint32_t eisn;
+ uint64_t kvm_src;
+
+ assert(xive_eas_is_valid(eas));
+
+ end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
+ end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
+ eisn = xive_get_field64(EAS_END_DATA, eas->w);
+ masked = xive_eas_is_masked(eas);
+
+ spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
+
+ kvm_src = priority << KVM_XIVE_SOURCE_PRIORITY_SHIFT &
+ KVM_XIVE_SOURCE_PRIORITY_MASK;
+ kvm_src |= server << KVM_XIVE_SOURCE_SERVER_SHIFT &
+ KVM_XIVE_SOURCE_SERVER_MASK;
+ kvm_src |= ((uint64_t) masked << KVM_XIVE_SOURCE_MASKED_SHIFT) &
+ KVM_XIVE_SOURCE_MASKED_MASK;
+ kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) &
+ KVM_XIVE_SOURCE_EISN_MASK;
+
+ return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn,
+ &kvm_src, true, errp);
+}
+
+void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp)
+{
+ kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_SYNC, lisn,
+ NULL, true, errp);
+}
+
+/*
+ * At reset, the interrupt sources are simply created and MASKED. We
+ * only need to inform the KVM XIVE device about their type: LSI or
+ * MSI.
+ */
+int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp)
+{
+ SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
+ uint64_t state = 0;
+
+ trace_kvm_xive_source_reset(srcno);
+
+ assert(xive->fd != -1);
+
+ if (xive_source_irq_is_lsi(xsrc, srcno)) {
+ state |= KVM_XIVE_LEVEL_SENSITIVE;
+ if (xive_source_is_asserted(xsrc, srcno)) {
+ state |= KVM_XIVE_LEVEL_ASSERTED;
+ }
+ }
+
+ return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, srcno, &state,
+ true, errp);
+}
+
+static int kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp)
+{
+ SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
+ int i;
+
+ for (i = 0; i < xsrc->nr_irqs; i++) {
+ int ret;
+
+ if (!xive_eas_is_valid(&xive->eat[i])) {
+ continue;
+ }
+
+ ret = kvmppc_xive_source_reset_one(xsrc, i, errp);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This is used to perform the magic loads on the ESB pages, described
+ * in xive.h.
+ *
+ * Memory barriers should not be needed for loads (no store for now).
+ */
+static uint64_t xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
+ uint64_t data, bool write)
+{
+ uint64_t *addr = xsrc->esb_mmap + xive_source_esb_mgmt(xsrc, srcno) +
+ offset;
+
+ if (write) {
+ *addr = cpu_to_be64(data);
+ return -1;
+ } else {
+ /* Prevent the compiler from optimizing away the load */
+ volatile uint64_t value = be64_to_cpu(*addr);
+ return value;
+ }
+}
+
+static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset)
+{
+ return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3;
+}
+
+static void kvmppc_xive_esb_trigger(XiveSource *xsrc, int srcno)
+{
+ xive_esb_rw(xsrc, srcno, 0, 0, true);
+}
+
+uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
+ uint64_t data, bool write)
+{
+ if (write) {
+ return xive_esb_rw(xsrc, srcno, offset, data, 1);
+ }
+
+ /*
+ * Special Load EOI handling for LSI sources. Q bit is never set
+ * and the interrupt should be re-triggered if the level is still
+ * asserted.
+ */
+ if (xive_source_irq_is_lsi(xsrc, srcno) &&
+ offset == XIVE_ESB_LOAD_EOI) {
+ xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00);
+ if (xive_source_is_asserted(xsrc, srcno)) {
+ kvmppc_xive_esb_trigger(xsrc, srcno);
+ }
+ return 0;
+ } else {
+ return xive_esb_rw(xsrc, srcno, offset, 0, 0);
+ }
+}
+
+static void kvmppc_xive_source_get_state(XiveSource *xsrc)
+{
+ SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
+ int i;
+
+ for (i = 0; i < xsrc->nr_irqs; i++) {
+ uint8_t pq;
+
+ if (!xive_eas_is_valid(&xive->eat[i])) {
+ continue;
+ }
+
+ /* Perform a load without side effect to retrieve the PQ bits */
+ pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
+
+ /* and save PQ locally */
+ xive_source_esb_set(xsrc, i, pq);
+ }
+}
+
+void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val)
+{
+ XiveSource *xsrc = opaque;
+
+ if (!xive_source_irq_is_lsi(xsrc, srcno)) {
+ if (!val) {
+ return;
+ }
+ } else {
+ xive_source_set_asserted(xsrc, srcno, val);
+ }
+
+ kvmppc_xive_esb_trigger(xsrc, srcno);
+}
+
+/*
+ * sPAPR XIVE interrupt controller (KVM)
+ */
+int kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
+ uint32_t end_idx, XiveEND *end,
+ Error **errp)
+{
+ struct kvm_ppc_xive_eq kvm_eq = { 0 };
+ uint64_t kvm_eq_idx;
+ uint8_t priority;
+ uint32_t server;
+ int ret;
+
+ assert(xive_end_is_valid(end));
+
+ /* Encode the tuple (server, prio) as a KVM EQ index */
+ spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
+
+ kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
+ KVM_XIVE_EQ_PRIORITY_MASK;
+ kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
+ KVM_XIVE_EQ_SERVER_MASK;
+
+ ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
+ &kvm_eq, false, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /*
+ * The EQ index and toggle bit are updated by HW. These are the
+ * only fields from KVM we want to update QEMU with. The other END
+ * fields should already be in the QEMU END table.
+ */
+ end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) |
+ xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex);
+
+ return 0;
+}
+
+int kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
+ uint32_t end_idx, XiveEND *end,
+ Error **errp)
+{
+ struct kvm_ppc_xive_eq kvm_eq = { 0 };
+ uint64_t kvm_eq_idx;
+ uint8_t priority;
+ uint32_t server;
+
+ /*
+ * Build the KVM state from the local END structure.
+ */
+
+ kvm_eq.flags = 0;
+ if (xive_get_field32(END_W0_UCOND_NOTIFY, end->w0)) {
+ kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
+ }
+
+ /*
+ * If the hcall is disabling the EQ, set the size and page address
+ * to zero. When migrating, only valid ENDs are taken into
+ * account.
+ */
+ if (xive_end_is_valid(end)) {
+ kvm_eq.qshift = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
+ kvm_eq.qaddr = xive_end_qaddr(end);
+ /*
+ * The EQ toggle bit and index should only be relevant when
+ * restoring the EQ state
+ */
+ kvm_eq.qtoggle = xive_get_field32(END_W1_GENERATION, end->w1);
+ kvm_eq.qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
+ } else {
+ kvm_eq.qshift = 0;
+ kvm_eq.qaddr = 0;
+ }
+
+ /* Encode the tuple (server, prio) as a KVM EQ index */
+ spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
+
+ kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
+ KVM_XIVE_EQ_PRIORITY_MASK;
+ kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
+ KVM_XIVE_EQ_SERVER_MASK;
+
+ return
+ kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
+ &kvm_eq, true, errp);
+}
+
+void kvmppc_xive_reset(SpaprXive *xive, Error **errp)
+{
+ kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_RESET,
+ NULL, true, errp);
+}
+
+static int kvmppc_xive_get_queues(SpaprXive *xive, Error **errp)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < xive->nr_ends; i++) {
+ if (!xive_end_is_valid(&xive->endt[i])) {
+ continue;
+ }
+
+ ret = kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
+ &xive->endt[i], errp);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The primary goal of the XIVE VM change handler is to mark the EQ
+ * pages dirty when all XIVE event notifications have stopped.
+ *
+ * Whenever the VM is stopped, the VM change handler sets the source
+ * PQs to PENDING to stop the flow of events and to possibly catch a
+ * triggered interrupt occuring while the VM is stopped. The previous
+ * state is saved in anticipation of a migration. The XIVE controller
+ * is then synced through KVM to flush any in-flight event
+ * notification and stabilize the EQs.
+ *
+ * At this stage, we can mark the EQ page dirty and let a migration
+ * sequence transfer the EQ pages to the destination, which is done
+ * just after the stop state.
+ *
+ * The previous configuration of the sources is restored when the VM
+ * runs again. If an interrupt was queued while the VM was stopped,
+ * simply generate a trigger.
+ */
+static void kvmppc_xive_change_state_handler(void *opaque, bool running,
+ RunState state)
+{
+ SpaprXive *xive = opaque;
+ XiveSource *xsrc = &xive->source;
+ Error *local_err = NULL;
+ int i;
+
+ /*
+ * Restore the sources to their initial state. This is called when
+ * the VM resumes after a stop or a migration.
+ */
+ if (running) {
+ for (i = 0; i < xsrc->nr_irqs; i++) {
+ uint8_t pq;
+ uint8_t old_pq;
+
+ if (!xive_eas_is_valid(&xive->eat[i])) {
+ continue;
+ }
+
+ pq = xive_source_esb_get(xsrc, i);
+ old_pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_00 + (pq << 8));
+
+ /*
+ * An interrupt was queued while the VM was stopped,
+ * generate a trigger.
+ */
+ if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) {
+ kvmppc_xive_esb_trigger(xsrc, i);
+ }
+ }
+
+ return;
+ }
+
+ /*
+ * Mask the sources, to stop the flow of event notifications, and
+ * save the PQs locally in the XiveSource object. The XiveSource
+ * state will be collected later on by its vmstate handler if a
+ * migration is in progress.
+ */
+ for (i = 0; i < xsrc->nr_irqs; i++) {
+ uint8_t pq;
+
+ if (!xive_eas_is_valid(&xive->eat[i])) {
+ continue;
+ }
+
+ pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
+
+ /*
+ * PQ is set to PENDING to possibly catch a triggered
+ * interrupt occuring while the VM is stopped (hotplug event
+ * for instance) .
+ */
+ if (pq != XIVE_ESB_OFF) {
+ pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_10);
+ }
+ xive_source_esb_set(xsrc, i, pq);
+ }
+
+ /*
+ * Sync the XIVE controller in KVM, to flush in-flight event
+ * notification that should be enqueued in the EQs and mark the
+ * XIVE EQ pages dirty to collect all updates.
+ */
+ kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
+ KVM_DEV_XIVE_EQ_SYNC, NULL, true, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return;
+ }
+}
+
+void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp)
+{
+ assert(xive->fd != -1);
+
+ /*
+ * When the VM is stopped, the sources are masked and the previous
+ * state is saved in anticipation of a migration. We should not
+ * synchronize the source state in that case else we will override
+ * the saved state.
+ */
+ if (runstate_is_running()) {
+ kvmppc_xive_source_get_state(&xive->source);
+ }
+
+ /* EAT: there is no extra state to query from KVM */
+
+ /* ENDT */
+ kvmppc_xive_get_queues(xive, errp);
+}
+
+/*
+ * The SpaprXive 'pre_save' method is called by the vmstate handler of
+ * the SpaprXive model, after the XIVE controller is synced in the VM
+ * change handler.
+ */
+int kvmppc_xive_pre_save(SpaprXive *xive)
+{
+ Error *local_err = NULL;
+ int ret;
+
+ assert(xive->fd != -1);
+
+ /* EAT: there is no extra state to query from KVM */
+
+ /* ENDT */
+ ret = kvmppc_xive_get_queues(xive, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * The SpaprXive 'post_load' method is not called by a vmstate
+ * handler. It is called at the sPAPR machine level at the end of the
+ * migration sequence by the sPAPR IRQ backend 'post_load' method,
+ * when all XIVE states have been transferred and loaded.
+ */
+int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
+{
+ Error *local_err = NULL;
+ CPUState *cs;
+ int i;
+ int ret;
+
+ /* The KVM XIVE device should be in use */
+ assert(xive->fd != -1);
+
+ /* Restore the ENDT first. The targetting depends on it. */
+ for (i = 0; i < xive->nr_ends; i++) {
+ if (!xive_end_is_valid(&xive->endt[i])) {
+ continue;
+ }
+
+ ret = kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
+ &xive->endt[i], &local_err);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ /* Restore the EAT */
+ for (i = 0; i < xive->nr_irqs; i++) {
+ if (!xive_eas_is_valid(&xive->eat[i])) {
+ continue;
+ }
+
+ /*
+ * We can only restore the source config if the source has been
+ * previously set in KVM. Since we don't do that for all interrupts
+ * at reset time anymore, let's do it now.
+ */
+ ret = kvmppc_xive_source_reset_one(&xive->source, i, &local_err);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ /*
+ * Restore the thread interrupt contexts of initial CPUs.
+ *
+ * The context of hotplugged CPUs is restored later, by the
+ * 'post_load' handler of the XiveTCTX model because they are not
+ * available at the time the SpaprXive 'post_load' method is
+ * called. We can not restore the context of all CPUs in the
+ * 'post_load' handler of XiveTCTX because the machine is not
+ * necessarily connected to the KVM device at that time.
+ */
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ ret = kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ /* The source states will be restored when the machine starts running */
+ return 0;
+
+fail:
+ error_report_err(local_err);
+ return ret;
+}
+
+/* Returns MAP_FAILED on error and sets errno */
+static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len,
+ Error **errp)
+{
+ void *addr;
+ uint32_t page_shift = 16; /* TODO: fix page_shift */
+
+ addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, xive->fd,
+ pgoff << page_shift);
+ if (addr == MAP_FAILED) {
+ error_setg_errno(errp, errno, "XIVE: unable to set memory mapping");
+ }
+
+ return addr;
+}
+
+/*
+ * All the XIVE memory regions are now backed by mappings from the KVM
+ * XIVE device.
+ */
+int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
+ Error **errp)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+ XiveSource *xsrc = &xive->source;
+ size_t esb_len = xive_source_esb_len(xsrc);
+ size_t tima_len = 4ull << TM_SHIFT;
+ CPUState *cs;
+ int fd;
+ void *addr;
+ int ret;
+
+ /*
+ * The KVM XIVE device already in use. This is the case when
+ * rebooting under the XIVE-only interrupt mode.
+ */
+ if (xive->fd != -1) {
+ return 0;
+ }
+
+ if (!kvmppc_has_cap_xive()) {
+ error_setg(errp, "IRQ_XIVE capability must be present for KVM");
+ return -1;
+ }
+
+ /* First, create the KVM XIVE device */
+ fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false);
+ if (fd < 0) {
+ error_setg_errno(errp, -fd, "XIVE: error creating KVM device");
+ return -1;
+ }
+ xive->fd = fd;
+
+ /* Tell KVM about the # of VCPUs we may have */
+ if (kvm_device_check_attr(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
+ KVM_DEV_XIVE_NR_SERVERS)) {
+ ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
+ KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true,
+ errp);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ /*
+ * 1. Source ESB pages - KVM mapping
+ */
+ addr = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len, errp);
+ if (addr == MAP_FAILED) {
+ goto fail;
+ }
+ xsrc->esb_mmap = addr;
+
+ memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc),
+ "xive.esb-kvm", esb_len, xsrc->esb_mmap);
+ memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0,
+ &xsrc->esb_mmio_kvm, 1);
+
+ /*
+ * 2. END ESB pages (No KVM support yet)
+ */
+
+ /*
+ * 3. TIMA pages - KVM mapping
+ */
+ addr = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len, errp);
+ if (addr == MAP_FAILED) {
+ goto fail;
+ }
+ xive->tm_mmap = addr;
+
+ memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive),
+ "xive.tima", tima_len, xive->tm_mmap);
+ memory_region_add_subregion_overlap(&xive->tm_mmio, 0,
+ &xive->tm_mmio_kvm, 1);
+
+ xive->change = qemu_add_vm_change_state_handler(
+ kvmppc_xive_change_state_handler, xive);
+
+ /* Connect the presenters to the initial VCPUs of the machine */
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ ret = kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, errp);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ /* Update the KVM sources */
+ ret = kvmppc_xive_source_reset(xsrc, errp);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ kvm_kernel_irqchip = true;
+ kvm_msi_via_irqfd_allowed = true;
+ kvm_gsi_direct_mapping = true;
+ return 0;
+
+fail:
+ kvmppc_xive_disconnect(intc);
+ return -1;
+}
+
+void kvmppc_xive_disconnect(SpaprInterruptController *intc)
+{
+ SpaprXive *xive = SPAPR_XIVE(intc);
+ XiveSource *xsrc;
+ size_t esb_len;
+
+ assert(xive->fd != -1);
+
+ /* Clear the KVM mapping */
+ xsrc = &xive->source;
+ esb_len = xive_source_esb_len(xsrc);
+
+ if (xsrc->esb_mmap) {
+ memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm);
+ object_unparent(OBJECT(&xsrc->esb_mmio_kvm));
+ munmap(xsrc->esb_mmap, esb_len);
+ xsrc->esb_mmap = NULL;
+ }
+
+ if (xive->tm_mmap) {
+ memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm);
+ object_unparent(OBJECT(&xive->tm_mmio_kvm));
+ munmap(xive->tm_mmap, 4ull << TM_SHIFT);
+ xive->tm_mmap = NULL;
+ }
+
+ /*
+ * When the KVM device fd is closed, the KVM device is destroyed
+ * and removed from the list of devices of the VM. The VCPU
+ * presenters are also detached from the device.
+ */
+ close(xive->fd);
+ xive->fd = -1;
+
+ kvm_kernel_irqchip = false;
+ kvm_msi_via_irqfd_allowed = false;
+ kvm_gsi_direct_mapping = false;
+
+ /* Clear the local list of presenter (hotplug) */
+ kvm_cpu_disable_all();
+
+ /* VM Change state handler is not needed anymore */
+ if (xive->change) {
+ qemu_del_vm_change_state_handler(xive->change);
+ xive->change = NULL;
+ }
+}
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
new file mode 100644
index 000000000..9aba7e3a7
--- /dev/null
+++ b/hw/intc/trace-events
@@ -0,0 +1,248 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# i8259.c
+pic_update_irq(bool master, uint8_t imr, uint8_t irr, uint8_t padd) "master %d imr %"PRIu8" irr %"PRIu8" padd %"PRIu8
+pic_set_irq(bool master, int irq, int level) "master %d irq %d level %d"
+pic_interrupt(int irq, int intno) "irq %d intno %d"
+pic_ioport_write(bool master, uint64_t addr, uint64_t val) "master %d addr 0x%"PRIx64" val 0x%"PRIx64
+pic_ioport_read(bool master, uint64_t addr, int val) "master %d addr 0x%"PRIx64" val 0x%x"
+
+# apic_common.c
+cpu_set_apic_base(uint64_t val) "0x%016"PRIx64
+cpu_get_apic_base(uint64_t val) "0x%016"PRIx64
+# coalescing
+apic_report_irq_delivered(int apic_irq_delivered) "coalescing %d"
+apic_reset_irq_delivered(int apic_irq_delivered) "old coalescing %d"
+apic_get_irq_delivered(int apic_irq_delivered) "returning coalescing %d"
+
+# apic.c
+apic_local_deliver(int vector, uint32_t lvt) "vector %d delivery mode %d"
+apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t vector_num, uint8_t trigger_mode) "dest %d dest_mode %d delivery_mode %d vector %d trigger_mode %d"
+apic_mem_readl(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x"
+apic_mem_writel(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x"
+
+# ioapic.c
+ioapic_set_remote_irr(int n) "set remote irr for pin %d"
+ioapic_clear_remote_irr(int n, int vector) "clear remote irr for pin %d vector %d"
+ioapic_eoi_broadcast(int vector) "EOI broadcast for vector %d"
+ioapic_eoi_delayed_reassert(int vector) "delayed reassert on EOI broadcast for vector %d"
+ioapic_mem_read(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapic mem read addr 0x%"PRIx8" regsel: 0x%"PRIx8" size 0x%"PRIx8" retval 0x%"PRIx32
+ioapic_mem_write(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapic mem write addr 0x%"PRIx8" regsel: 0x%"PRIx8" size 0x%"PRIx8" val 0x%"PRIx32
+ioapic_set_irq(int vector, int level) "vector: %d level: %d"
+
+# slavio_intctl.c
+slavio_intctl_mem_readl(uint32_t cpu, uint64_t addr, uint32_t ret) "read cpu %d reg 0x%"PRIx64" = 0x%x"
+slavio_intctl_mem_writel(uint32_t cpu, uint64_t addr, uint32_t val) "write cpu %d reg 0x%"PRIx64" = 0x%x"
+slavio_intctl_mem_writel_clear(uint32_t cpu, uint32_t val, uint32_t intreg_pending) "Cleared cpu %d irq mask 0x%x, curmask 0x%x"
+slavio_intctl_mem_writel_set(uint32_t cpu, uint32_t val, uint32_t intreg_pending) "Set cpu %d irq mask 0x%x, curmask 0x%x"
+slavio_intctlm_mem_readl(uint64_t addr, uint32_t ret) "read system reg 0x%"PRIx64" = 0x%x"
+slavio_intctlm_mem_writel(uint64_t addr, uint32_t val) "write system reg 0x%"PRIx64" = 0x%x"
+slavio_intctlm_mem_writel_enable(uint32_t val, uint32_t intregm_disabled) "Enabled master irq mask 0x%x, curmask 0x%x"
+slavio_intctlm_mem_writel_disable(uint32_t val, uint32_t intregm_disabled) "Disabled master irq mask 0x%x, curmask 0x%x"
+slavio_intctlm_mem_writel_target(uint32_t cpu) "Set master irq cpu %d"
+slavio_check_interrupts(uint32_t pending, uint32_t intregm_disabled) "pending 0x%x disabled 0x%x"
+slavio_set_irq(uint32_t target_cpu, int irq, uint32_t pil, int level) "Set cpu %d irq %d -> pil %d level %d"
+slavio_set_timer_irq_cpu(int cpu, int level) "Set cpu %d local timer level %d"
+
+# grlib_irqmp.c
+grlib_irqmp_check_irqs(uint32_t pend, uint32_t force, uint32_t mask, uint32_t lvl1, uint32_t lvl2) "pend:0x%04x force:0x%04x mask:0x%04x lvl1:0x%04x lvl0:0x%04x"
+grlib_irqmp_ack(int intno) "interrupt:%d"
+grlib_irqmp_set_irq(int irq) "Raise CPU IRQ %d"
+grlib_irqmp_readl_unknown(uint64_t addr) "addr 0x%"PRIx64
+grlib_irqmp_writel_unknown(uint64_t addr, uint32_t value) "addr 0x%"PRIx64" value 0x%x"
+
+# xics.c
+xics_icp_check_ipi(int server, uint8_t mfrr) "CPU %d can take IPI mfrr=0x%x"
+xics_icp_accept(uint32_t old_xirr, uint32_t new_xirr) "icp_accept: XIRR 0x%"PRIx32"->0x%"PRIx32
+xics_icp_eoi(int server, uint32_t xirr, uint32_t new_xirr) "icp_eoi: server %d given XIRR 0x%"PRIx32" new XIRR 0x%"PRIx32
+xics_icp_irq(int server, int nr, uint8_t priority) "cpu %d trying to deliver irq 0x%"PRIx32" priority 0x%x"
+xics_icp_raise(uint32_t xirr, uint8_t pending_priority) "raising IRQ new XIRR=0x%x new pending priority=0x%x"
+xics_ics_set_irq_msi(int srcno, int nr) "set_irq_msi: srcno %d [irq 0x%x]"
+xics_masked_pending(void) "set_irq_msi: masked pending"
+xics_ics_set_irq_lsi(int srcno, int nr) "set_irq_lsi: srcno %d [irq 0x%x]"
+xics_ics_write_xive(int nr, int srcno, int server, uint8_t priority) "ics_write_xive: irq 0x%x [src %d] server 0x%x prio 0x%x"
+xics_ics_reject(int nr, int srcno) "reject irq 0x%x [src %d]"
+xics_ics_eoi(int nr) "ics_eoi: irq 0x%x"
+
+# s390_flic_kvm.c
+flic_create_device(int err) "flic: create device failed %d"
+flic_reset_failed(int err) "flic: reset failed %d"
+
+# s390_flic.c
+qemu_s390_airq_suppressed(uint8_t type, uint8_t isc) "flic: adapter I/O interrupt suppressed (type 0x%x isc 0x%x)"
+qemu_s390_suppress_airq(uint8_t isc, const char *from, const char *to) "flic: for isc 0x%x, suppress airq by modifying ais mode from %s to %s"
+
+# aspeed_vic.c
+aspeed_vic_set_irq(int irq, int level) "Enabling IRQ %d: %d"
+aspeed_vic_update_fiq(int flags) "Raising FIQ: %d"
+aspeed_vic_update_irq(int flags) "Raising IRQ: %d"
+aspeed_vic_read(uint64_t offset, unsigned size, uint32_t value) "From 0x%" PRIx64 " of size %u: 0x%" PRIx32
+aspeed_vic_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32
+
+# arm_gic.c
+gic_enable_irq(int irq) "irq %d enabled"
+gic_disable_irq(int irq) "irq %d disabled"
+gic_set_irq(int irq, int level, int cpumask, int target) "irq %d level %d cpumask 0x%x target 0x%x"
+gic_update_bestirq(const char *s, int cpu, int irq, int prio, int priority_mask, int running_priority) "%s %d irq %d priority %d cpu priority mask %d cpu running priority %d"
+gic_update_set_irq(int cpu, const char *name, int level) "cpu[%d]: %s = %d"
+gic_acknowledge_irq(const char *s, int cpu, int irq) "%s %d acknowledged irq %d"
+gic_cpu_write(const char *s, int cpu, int addr, uint32_t val) "%s %d iface write at 0x%08x 0x%08" PRIx32
+gic_cpu_read(const char *s, int cpu, int addr, uint32_t val) "%s %d iface read at 0x%08x: 0x%08" PRIx32
+gic_hyp_read(int addr, uint32_t val) "hyp read at 0x%08x: 0x%08" PRIx32
+gic_hyp_write(int addr, uint32_t val) "hyp write at 0x%08x: 0x%08" PRIx32
+gic_dist_read(int addr, unsigned int size, uint32_t val) "dist read at 0x%08x size %u: 0x%08" PRIx32
+gic_dist_write(int addr, unsigned int size, uint32_t val) "dist write at 0x%08x size %u: 0x%08" PRIx32
+gic_lr_entry(int cpu, int entry, uint32_t val) "cpu %d: new lr entry %d: 0x%08" PRIx32
+gic_update_maintenance_irq(int cpu, int val) "cpu %d: maintenance = %d"
+
+# arm_gicv3_cpuif.c
+gicv3_icc_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_pmr_write(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR write cpu 0x%x value 0x%" PRIx64
+gicv3_icc_bpr_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_BPR%d read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_bpr_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_BPR%d write cpu 0x%x value 0x%" PRIx64
+gicv3_icc_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICC_AP%dR%d read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICC_AP%dR%d write cpu 0x%x value 0x%" PRIx64
+gicv3_icc_igrpen_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN%d read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_igrpen_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN%d write cpu 0x%x value 0x%" PRIx64
+gicv3_icc_igrpen1_el3_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN1_EL3 read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_igrpen1_el3_write(uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN1_EL3 write cpu 0x%x value 0x%" PRIx64
+gicv3_icc_ctlr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_CTLR read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_ctlr_write(uint32_t cpu, uint64_t val) "GICv3 ICC_CTLR write cpu 0x%x value 0x%" PRIx64
+gicv3_icc_ctlr_el3_read(uint32_t cpu, uint64_t val) "GICv3 ICC_CTLR_EL3 read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_ctlr_el3_write(uint32_t cpu, uint64_t val) "GICv3 ICC_CTLR_EL3 write cpu 0x%x value 0x%" PRIx64
+gicv3_cpuif_update(uint32_t cpuid, int irq, int grp, int prio) "GICv3 CPU i/f 0x%x HPPI update: irq %d group %d prio %d"
+gicv3_cpuif_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel) "GICv3 CPU i/f 0x%x HPPI update: setting FIQ %d IRQ %d"
+gicv3_icc_generate_sgi(uint32_t cpuid, int irq, int irm, uint32_t aff, uint32_t targetlist) "GICv3 CPU i/f 0x%x generating SGI %d IRM %d target affinity 0x%xxx targetlist 0x%x"
+gicv3_icc_iar0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR0 read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_iar1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR1 read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_EOIR%d write cpu 0x%x value 0x%" PRIx64
+gicv3_icc_hppir0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR0 read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_hppir1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR1 read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICC_DIR write cpu 0x%x value 0x%" PRIx64
+gicv3_icc_rpr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_RPR read cpu 0x%x value 0x%" PRIx64
+gicv3_ich_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_AP%dR%d read cpu 0x%x value 0x%" PRIx64
+gicv3_ich_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_AP%dR%d write cpu 0x%x value 0x%" PRIx64
+gicv3_ich_hcr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_HCR_EL2 read cpu 0x%x value 0x%" PRIx64
+gicv3_ich_hcr_write(uint32_t cpu, uint64_t val) "GICv3 ICH_HCR_EL2 write cpu 0x%x value 0x%" PRIx64
+gicv3_ich_vmcr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_VMCR_EL2 read cpu 0x%x value 0x%" PRIx64
+gicv3_ich_vmcr_write(uint32_t cpu, uint64_t val) "GICv3 ICH_VMCR_EL2 write cpu 0x%x value 0x%" PRIx64
+gicv3_ich_lr_read(int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_LR%d_EL2 read cpu 0x%x value 0x%" PRIx64
+gicv3_ich_lr32_read(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LR%d read cpu 0x%x value 0x%" PRIx32
+gicv3_ich_lrc_read(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LRC%d read cpu 0x%x value 0x%" PRIx32
+gicv3_ich_lr_write(int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_LR%d_EL2 write cpu 0x%x value 0x%" PRIx64
+gicv3_ich_lr32_write(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LR%d write cpu 0x%x value 0x%" PRIx32
+gicv3_ich_lrc_write(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LRC%d write cpu 0x%x value 0x%" PRIx32
+gicv3_ich_vtr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_VTR read cpu 0x%x value 0x%" PRIx64
+gicv3_ich_misr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_MISR read cpu 0x%x value 0x%" PRIx64
+gicv3_ich_eisr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_EISR read cpu 0x%x value 0x%" PRIx64
+gicv3_ich_elrsr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_ELRSR read cpu 0x%x value 0x%" PRIx64
+gicv3_icv_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICV_AP%dR%d read cpu 0x%x value 0x%" PRIx64
+gicv3_icv_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICV_AP%dR%d write cpu 0x%x value 0x%" PRIx64
+gicv3_icv_bpr_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_BPR%d read cpu 0x%x value 0x%" PRIx64
+gicv3_icv_bpr_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_BPR%d write cpu 0x%x value 0x%" PRIx64
+gicv3_icv_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_PMR read cpu 0x%x value 0x%" PRIx64
+gicv3_icv_pmr_write(uint32_t cpu, uint64_t val) "GICv3 ICV_PMR write cpu 0x%x value 0x%" PRIx64
+gicv3_icv_igrpen_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IGRPEN%d read cpu 0x%x value 0x%" PRIx64
+gicv3_icv_igrpen_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IGRPEN%d write cpu 0x%x value 0x%" PRIx64
+gicv3_icv_ctlr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_CTLR read cpu 0x%x value 0x%" PRIx64
+gicv3_icv_ctlr_write(uint32_t cpu, uint64_t val) "GICv3 ICV_CTLR write cpu 0x%x value 0x%" PRIx64
+gicv3_icv_rpr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_RPR read cpu 0x%x value 0x%" PRIx64
+gicv3_icv_hppir_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_HPPIR%d read cpu 0x%x value 0x%" PRIx64
+gicv3_icv_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICV_DIR write cpu 0x%x value 0x%" PRIx64
+gicv3_icv_iar_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IAR%d read cpu 0x%x value 0x%" PRIx64
+gicv3_icv_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_EOIR%d write cpu 0x%x value 0x%" PRIx64
+gicv3_cpuif_virt_update(uint32_t cpuid, int idx) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d"
+gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel, int maintlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d maintenance-irq %d"
+
+# arm_gicv3_dist.c
+gicv3_dist_read(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d"
+gicv3_dist_badread(uint64_t offset, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " size %u secure %d: error"
+gicv3_dist_write(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d"
+gicv3_dist_badwrite(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d: error"
+gicv3_dist_set_irq(int irq, int level) "GICv3 distributor interrupt %d level changed to %d"
+
+# arm_gicv3_redist.c
+gicv3_redist_read(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 redistributor 0x%x read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d"
+gicv3_redist_badread(uint32_t cpu, uint64_t offset, unsigned size, bool secure) "GICv3 redistributor 0x%x read: offset 0x%" PRIx64 " size %u secure %d: error"
+gicv3_redist_write(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 redistributor 0x%x write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d"
+gicv3_redist_badwrite(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 redistributor 0x%x write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d: error"
+gicv3_redist_set_irq(uint32_t cpu, int irq, int level) "GICv3 redistributor 0x%x interrupt %d level changed to %d"
+gicv3_redist_send_sgi(uint32_t cpu, int irq) "GICv3 redistributor 0x%x pending SGI %d"
+
+# armv7m_nvic.c
+nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d"
+nvic_recompute_state_secure(int vectpending, bool vectpending_is_s_banked, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d is_s_banked %d vectpending_prio %d exception_prio %d"
+nvic_set_prio(int irq, bool secure, uint8_t prio) "NVIC set irq %d secure-bank %d priority %d"
+nvic_irq_update(int vectpending, int pendprio, int exception_prio, int level) "NVIC vectpending %d pending prio %d exception_prio %d: setting irq line to %d"
+nvic_escalate_prio(int irq, int irqprio, int runprio) "NVIC escalating irq %d to HardFault: insufficient priority %d >= %d"
+nvic_escalate_disabled(int irq) "NVIC escalating irq %d to HardFault: disabled"
+nvic_set_pending(int irq, bool secure, bool targets_secure, bool derived, int en, int prio) "NVIC set pending irq %d secure-bank %d targets_secure %d derived %d (enabled: %d priority %d)"
+nvic_clear_pending(int irq, bool secure, int en, int prio) "NVIC clear pending irq %d secure-bank %d (enabled: %d priority %d)"
+nvic_acknowledge_irq(int irq, int prio) "NVIC acknowledge IRQ: %d now active (prio %d)"
+nvic_get_pending_irq_info(int irq, bool secure) "NVIC next IRQ %d: targets_secure: %d"
+nvic_complete_irq(int irq, bool secure) "NVIC complete IRQ %d (secure %d)"
+nvic_set_irq_level(int irq, int level) "NVIC external irq %d level set to %d"
+nvic_set_nmi_level(int level) "NVIC external NMI level set to %d"
+nvic_sysreg_read(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u"
+nvic_sysreg_write(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u"
+
+# heathrow_pic.c
+heathrow_write(uint64_t addr, unsigned int n, uint64_t value) "0x%"PRIx64" %u: 0x%"PRIx64
+heathrow_read(uint64_t addr, unsigned int n, uint64_t value) "0x%"PRIx64" %u: 0x%"PRIx64
+heathrow_set_irq(int num, int level) "set_irq: num=0x%02x level=%d"
+
+# bcm2835_ic.c
+bcm2835_ic_set_gpu_irq(int irq, int level) "GPU irq #%d level %d"
+bcm2835_ic_set_cpu_irq(int irq, int level) "CPU irq #%d level %d"
+
+# spapr_xive.c
+spapr_xive_claim_irq(uint32_t lisn, bool lsi) "lisn=0x%x lsi=%d"
+spapr_xive_free_irq(uint32_t lisn) "lisn=0x%x"
+spapr_xive_set_irq(uint32_t lisn, uint32_t val) "lisn=0x%x val=%d"
+spapr_xive_get_source_info(uint64_t flags, uint64_t lisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64
+spapr_xive_set_source_config(uint64_t flags, uint64_t lisn, uint64_t target, uint64_t priority, uint64_t eisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64" eisn=0x%"PRIx64
+spapr_xive_get_source_config(uint64_t flags, uint64_t lisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64
+spapr_xive_get_queue_info(uint64_t flags, uint64_t target, uint64_t priority) "flags=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64
+spapr_xive_set_queue_config(uint64_t flags, uint64_t target, uint64_t priority, uint64_t qpage, uint64_t qsize) "flags=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64" qpage=0x%"PRIx64" qsize=0x%"PRIx64
+spapr_xive_get_queue_config(uint64_t flags, uint64_t target, uint64_t priority) "flags=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64
+spapr_xive_set_os_reporting_line(uint64_t flags) "flags=0x%"PRIx64
+spapr_xive_get_os_reporting_line(uint64_t flags) "flags=0x%"PRIx64
+spapr_xive_esb(uint64_t flags, uint64_t lisn, uint64_t offset, uint64_t data) "flags=0x%"PRIx64" lisn=0x%"PRIx64" offset=0x%"PRIx64" data=0x%"PRIx64
+spapr_xive_sync(uint64_t flags, uint64_t lisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64
+spapr_xive_reset(uint64_t flags) "flags=0x%"PRIx64
+
+# spapr_xive_kvm.c
+kvm_xive_cpu_connect(uint32_t id) "connect CPU%d to KVM device"
+kvm_xive_source_reset(uint32_t srcno) "IRQ 0x%x"
+
+# xive.c
+xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK"
+xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !"
+xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x"
+xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64
+xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64
+xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "END 0x%02x/0x%04x -> enqueue 0x%08x"
+xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x"
+xive_tctx_tm_write(uint64_t offset, unsigned int size, uint64_t value) "@0x%"PRIx64" sz=%d val=0x%" PRIx64
+xive_tctx_tm_read(uint64_t offset, unsigned int size, uint64_t value) "@0x%"PRIx64" sz=%d val=0x%" PRIx64
+xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x"
+xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64
+
+# pnv_xive.c
+pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64
+
+# goldfish_pic.c
+goldfish_irq_request(void *dev, int idx, int irq, int level) "pic: %p goldfish-irq.%d irq: %d level: %d"
+goldfish_pic_read(void *dev, int idx, unsigned int addr, unsigned int size, uint64_t value) "pic: %p goldfish-irq.%d reg: 0x%02x size: %d value: 0x%"PRIx64
+goldfish_pic_write(void *dev, int idx, unsigned int addr, unsigned int size, uint64_t value) "pic: %p goldfish-irq.%d reg: 0x%02x size: %d value: 0x%"PRIx64
+goldfish_pic_reset(void *dev, int idx) "pic: %p goldfish-irq.%d"
+goldfish_pic_realize(void *dev, int idx) "pic: %p goldfish-irq.%d"
+goldfish_pic_instance_init(void *dev) "pic: %p goldfish-irq"
+
+# sh_intc.c
+sh_intc_sources(int p, int a, int c, int m, unsigned short v, const char *s1, const char *s2, const char *s3) "(%d/%d/%d/%d) interrupt source 0x%x %s%s%s"
+sh_intc_pending(int p, unsigned short v) "(%d) returning interrupt source 0x%x"
+sh_intc_register(const char *s, int id, unsigned short v, int c, int m) "%s %u -> 0x%04x (%d/%d)"
+sh_intc_read(unsigned size, uint64_t offset, unsigned long val) "size %u 0x%" PRIx64 " -> 0x%lx"
+sh_intc_write(unsigned size, uint64_t offset, unsigned long val) "size %u 0x%" PRIx64 " <- 0x%lx"
+sh_intc_set(int id, int enable) "setting interrupt group %d to %d"
diff --git a/hw/intc/trace.h b/hw/intc/trace.h
new file mode 100644
index 000000000..02394aea2
--- /dev/null
+++ b/hw/intc/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_intc.h"
diff --git a/hw/intc/vgic_common.h b/hw/intc/vgic_common.h
new file mode 100644
index 000000000..80d919eb9
--- /dev/null
+++ b/hw/intc/vgic_common.h
@@ -0,0 +1,35 @@
+/*
+ * ARM KVM vGIC utility functions
+ *
+ * Copyright (c) 2015 Samsung Electronics
+ * Written by Pavel Fedin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_ARM_VGIC_COMMON_H
+#define QEMU_ARM_VGIC_COMMON_H
+
+/**
+ * kvm_arm_gic_set_irq - Send an IRQ to the in-kernel vGIC
+ * @num_irq: Total number of IRQs configured for the GIC instance
+ * @irq: qemu internal IRQ line number:
+ * [0..N-1] : external interrupts
+ * [N..N+31] : PPI (internal) interrupts for CPU 0
+ * [N+32..N+63] : PPI (internal interrupts for CPU 1
+ * @level: level of the IRQ line.
+ */
+void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level);
+
+#endif
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
new file mode 100644
index 000000000..48a835eab
--- /dev/null
+++ b/hw/intc/xics.c
@@ -0,0 +1,751 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
+ *
+ * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
+ *
+ * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "trace.h"
+#include "qemu/timer.h"
+#include "hw/ppc/xics.h"
+#include "hw/qdev-properties.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "qapi/visitor.h"
+#include "migration/vmstate.h"
+#include "monitor/monitor.h"
+#include "hw/intc/intc.h"
+#include "hw/irq.h"
+#include "sysemu/kvm.h"
+#include "sysemu/reset.h"
+
+void icp_pic_print_info(ICPState *icp, Monitor *mon)
+{
+ int cpu_index;
+
+ /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
+ * are hot plugged or unplugged.
+ */
+ if (!icp) {
+ return;
+ }
+
+ cpu_index = icp->cs ? icp->cs->cpu_index : -1;
+
+ if (!icp->output) {
+ return;
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ icp_synchronize_state(icp);
+ }
+
+ monitor_printf(mon, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n",
+ cpu_index, icp->xirr, icp->xirr_owner,
+ icp->pending_priority, icp->mfrr);
+}
+
+void ics_pic_print_info(ICSState *ics, Monitor *mon)
+{
+ uint32_t i;
+
+ monitor_printf(mon, "ICS %4x..%4x %p\n",
+ ics->offset, ics->offset + ics->nr_irqs - 1, ics);
+
+ if (!ics->irqs) {
+ return;
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ ics_synchronize_state(ics);
+ }
+
+ for (i = 0; i < ics->nr_irqs; i++) {
+ ICSIRQState *irq = ics->irqs + i;
+
+ if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) {
+ continue;
+ }
+ monitor_printf(mon, " %4x %s %02x %02x\n",
+ ics->offset + i,
+ (irq->flags & XICS_FLAGS_IRQ_LSI) ?
+ "LSI" : "MSI",
+ irq->priority, irq->status);
+ }
+}
+
+/*
+ * ICP: Presentation layer
+ */
+
+#define XISR_MASK 0x00ffffff
+#define CPPR_MASK 0xff000000
+
+#define XISR(icp) (((icp)->xirr) & XISR_MASK)
+#define CPPR(icp) (((icp)->xirr) >> 24)
+
+static void ics_reject(ICSState *ics, uint32_t nr);
+static void ics_eoi(ICSState *ics, uint32_t nr);
+
+static void icp_check_ipi(ICPState *icp)
+{
+ if (XISR(icp) && (icp->pending_priority <= icp->mfrr)) {
+ return;
+ }
+
+ trace_xics_icp_check_ipi(icp->cs->cpu_index, icp->mfrr);
+
+ if (XISR(icp) && icp->xirr_owner) {
+ ics_reject(icp->xirr_owner, XISR(icp));
+ }
+
+ icp->xirr = (icp->xirr & ~XISR_MASK) | XICS_IPI;
+ icp->pending_priority = icp->mfrr;
+ icp->xirr_owner = NULL;
+ qemu_irq_raise(icp->output);
+}
+
+void icp_resend(ICPState *icp)
+{
+ XICSFabric *xi = icp->xics;
+ XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
+
+ if (icp->mfrr < CPPR(icp)) {
+ icp_check_ipi(icp);
+ }
+
+ xic->ics_resend(xi);
+}
+
+void icp_set_cppr(ICPState *icp, uint8_t cppr)
+{
+ uint8_t old_cppr;
+ uint32_t old_xisr;
+
+ old_cppr = CPPR(icp);
+ icp->xirr = (icp->xirr & ~CPPR_MASK) | (cppr << 24);
+
+ if (cppr < old_cppr) {
+ if (XISR(icp) && (cppr <= icp->pending_priority)) {
+ old_xisr = XISR(icp);
+ icp->xirr &= ~XISR_MASK; /* Clear XISR */
+ icp->pending_priority = 0xff;
+ qemu_irq_lower(icp->output);
+ if (icp->xirr_owner) {
+ ics_reject(icp->xirr_owner, old_xisr);
+ icp->xirr_owner = NULL;
+ }
+ }
+ } else {
+ if (!XISR(icp)) {
+ icp_resend(icp);
+ }
+ }
+}
+
+void icp_set_mfrr(ICPState *icp, uint8_t mfrr)
+{
+ icp->mfrr = mfrr;
+ if (mfrr < CPPR(icp)) {
+ icp_check_ipi(icp);
+ }
+}
+
+uint32_t icp_accept(ICPState *icp)
+{
+ uint32_t xirr = icp->xirr;
+
+ qemu_irq_lower(icp->output);
+ icp->xirr = icp->pending_priority << 24;
+ icp->pending_priority = 0xff;
+ icp->xirr_owner = NULL;
+
+ trace_xics_icp_accept(xirr, icp->xirr);
+
+ return xirr;
+}
+
+uint32_t icp_ipoll(ICPState *icp, uint32_t *mfrr)
+{
+ if (mfrr) {
+ *mfrr = icp->mfrr;
+ }
+ return icp->xirr;
+}
+
+void icp_eoi(ICPState *icp, uint32_t xirr)
+{
+ XICSFabric *xi = icp->xics;
+ XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
+ ICSState *ics;
+ uint32_t irq;
+
+ /* Send EOI -> ICS */
+ icp->xirr = (icp->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
+ trace_xics_icp_eoi(icp->cs->cpu_index, xirr, icp->xirr);
+ irq = xirr & XISR_MASK;
+
+ ics = xic->ics_get(xi, irq);
+ if (ics) {
+ ics_eoi(ics, irq);
+ }
+ if (!XISR(icp)) {
+ icp_resend(icp);
+ }
+}
+
+void icp_irq(ICSState *ics, int server, int nr, uint8_t priority)
+{
+ ICPState *icp = xics_icp_get(ics->xics, server);
+
+ trace_xics_icp_irq(server, nr, priority);
+
+ if ((priority >= CPPR(icp))
+ || (XISR(icp) && (icp->pending_priority <= priority))) {
+ ics_reject(ics, nr);
+ } else {
+ if (XISR(icp) && icp->xirr_owner) {
+ ics_reject(icp->xirr_owner, XISR(icp));
+ icp->xirr_owner = NULL;
+ }
+ icp->xirr = (icp->xirr & ~XISR_MASK) | (nr & XISR_MASK);
+ icp->xirr_owner = ics;
+ icp->pending_priority = priority;
+ trace_xics_icp_raise(icp->xirr, icp->pending_priority);
+ qemu_irq_raise(icp->output);
+ }
+}
+
+static int icp_pre_save(void *opaque)
+{
+ ICPState *icp = opaque;
+
+ if (kvm_irqchip_in_kernel()) {
+ icp_get_kvm_state(icp);
+ }
+
+ return 0;
+}
+
+static int icp_post_load(void *opaque, int version_id)
+{
+ ICPState *icp = opaque;
+
+ if (kvm_irqchip_in_kernel()) {
+ Error *local_err = NULL;
+ int ret;
+
+ ret = icp_set_kvm_state(icp, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_icp_server = {
+ .name = "icp/server",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = icp_pre_save,
+ .post_load = icp_post_load,
+ .fields = (VMStateField[]) {
+ /* Sanity check */
+ VMSTATE_UINT32(xirr, ICPState),
+ VMSTATE_UINT8(pending_priority, ICPState),
+ VMSTATE_UINT8(mfrr, ICPState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+void icp_reset(ICPState *icp)
+{
+ icp->xirr = 0;
+ icp->pending_priority = 0xff;
+ icp->mfrr = 0xff;
+
+ if (kvm_irqchip_in_kernel()) {
+ Error *local_err = NULL;
+
+ icp_set_kvm_state(icp, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ }
+}
+
+static void icp_realize(DeviceState *dev, Error **errp)
+{
+ ICPState *icp = ICP(dev);
+ CPUPPCState *env;
+ Error *err = NULL;
+
+ assert(icp->xics);
+ assert(icp->cs);
+
+ env = &POWERPC_CPU(icp->cs)->env;
+ switch (PPC_INPUT(env)) {
+ case PPC_FLAGS_INPUT_POWER7:
+ icp->output = env->irq_inputs[POWER7_INPUT_INT];
+ break;
+ case PPC_FLAGS_INPUT_POWER9: /* For SPAPR xics emulation */
+ icp->output = env->irq_inputs[POWER9_INPUT_INT];
+ break;
+
+ case PPC_FLAGS_INPUT_970:
+ icp->output = env->irq_inputs[PPC970_INPUT_INT];
+ break;
+
+ default:
+ error_setg(errp, "XICS interrupt controller does not support this CPU bus model");
+ return;
+ }
+
+ /* Connect the presenter to the VCPU (required for CPU hotplug) */
+ if (kvm_irqchip_in_kernel()) {
+ icp_kvm_realize(dev, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ }
+
+ vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp);
+}
+
+static void icp_unrealize(DeviceState *dev)
+{
+ ICPState *icp = ICP(dev);
+
+ vmstate_unregister(NULL, &vmstate_icp_server, icp);
+}
+
+static Property icp_properties[] = {
+ DEFINE_PROP_LINK(ICP_PROP_XICS, ICPState, xics, TYPE_XICS_FABRIC,
+ XICSFabric *),
+ DEFINE_PROP_LINK(ICP_PROP_CPU, ICPState, cs, TYPE_CPU, CPUState *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void icp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = icp_realize;
+ dc->unrealize = icp_unrealize;
+ device_class_set_props(dc, icp_properties);
+ /*
+ * Reason: part of XICS interrupt controller, needs to be wired up
+ * by icp_create().
+ */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo icp_info = {
+ .name = TYPE_ICP,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(ICPState),
+ .class_init = icp_class_init,
+ .class_size = sizeof(ICPStateClass),
+};
+
+Object *icp_create(Object *cpu, const char *type, XICSFabric *xi, Error **errp)
+{
+ Object *obj;
+
+ obj = object_new(type);
+ object_property_add_child(cpu, type, obj);
+ object_unref(obj);
+ object_property_set_link(obj, ICP_PROP_XICS, OBJECT(xi), &error_abort);
+ object_property_set_link(obj, ICP_PROP_CPU, cpu, &error_abort);
+ if (!qdev_realize(DEVICE(obj), NULL, errp)) {
+ object_unparent(obj);
+ obj = NULL;
+ }
+
+ return obj;
+}
+
+void icp_destroy(ICPState *icp)
+{
+ Object *obj = OBJECT(icp);
+
+ object_unparent(obj);
+}
+
+/*
+ * ICS: Source layer
+ */
+static void ics_resend_msi(ICSState *ics, int srcno)
+{
+ ICSIRQState *irq = ics->irqs + srcno;
+
+ /* FIXME: filter by server#? */
+ if (irq->status & XICS_STATUS_REJECTED) {
+ irq->status &= ~XICS_STATUS_REJECTED;
+ if (irq->priority != 0xff) {
+ icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
+ }
+ }
+}
+
+static void ics_resend_lsi(ICSState *ics, int srcno)
+{
+ ICSIRQState *irq = ics->irqs + srcno;
+
+ if ((irq->priority != 0xff)
+ && (irq->status & XICS_STATUS_ASSERTED)
+ && !(irq->status & XICS_STATUS_SENT)) {
+ irq->status |= XICS_STATUS_SENT;
+ icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
+ }
+}
+
+static void ics_set_irq_msi(ICSState *ics, int srcno, int val)
+{
+ ICSIRQState *irq = ics->irqs + srcno;
+
+ trace_xics_ics_set_irq_msi(srcno, srcno + ics->offset);
+
+ if (val) {
+ if (irq->priority == 0xff) {
+ irq->status |= XICS_STATUS_MASKED_PENDING;
+ trace_xics_masked_pending();
+ } else {
+ icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
+ }
+ }
+}
+
+static void ics_set_irq_lsi(ICSState *ics, int srcno, int val)
+{
+ ICSIRQState *irq = ics->irqs + srcno;
+
+ trace_xics_ics_set_irq_lsi(srcno, srcno + ics->offset);
+ if (val) {
+ irq->status |= XICS_STATUS_ASSERTED;
+ } else {
+ irq->status &= ~XICS_STATUS_ASSERTED;
+ }
+ ics_resend_lsi(ics, srcno);
+}
+
+void ics_set_irq(void *opaque, int srcno, int val)
+{
+ ICSState *ics = (ICSState *)opaque;
+
+ if (kvm_irqchip_in_kernel()) {
+ ics_kvm_set_irq(ics, srcno, val);
+ return;
+ }
+
+ if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
+ ics_set_irq_lsi(ics, srcno, val);
+ } else {
+ ics_set_irq_msi(ics, srcno, val);
+ }
+}
+
+static void ics_write_xive_msi(ICSState *ics, int srcno)
+{
+ ICSIRQState *irq = ics->irqs + srcno;
+
+ if (!(irq->status & XICS_STATUS_MASKED_PENDING)
+ || (irq->priority == 0xff)) {
+ return;
+ }
+
+ irq->status &= ~XICS_STATUS_MASKED_PENDING;
+ icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
+}
+
+static void ics_write_xive_lsi(ICSState *ics, int srcno)
+{
+ ics_resend_lsi(ics, srcno);
+}
+
+void ics_write_xive(ICSState *ics, int srcno, int server,
+ uint8_t priority, uint8_t saved_priority)
+{
+ ICSIRQState *irq = ics->irqs + srcno;
+
+ irq->server = server;
+ irq->priority = priority;
+ irq->saved_priority = saved_priority;
+
+ trace_xics_ics_write_xive(ics->offset + srcno, srcno, server, priority);
+
+ if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
+ ics_write_xive_lsi(ics, srcno);
+ } else {
+ ics_write_xive_msi(ics, srcno);
+ }
+}
+
+static void ics_reject(ICSState *ics, uint32_t nr)
+{
+ ICSStateClass *isc = ICS_GET_CLASS(ics);
+ ICSIRQState *irq = ics->irqs + nr - ics->offset;
+
+ if (isc->reject) {
+ isc->reject(ics, nr);
+ return;
+ }
+
+ trace_xics_ics_reject(nr, nr - ics->offset);
+ if (irq->flags & XICS_FLAGS_IRQ_MSI) {
+ irq->status |= XICS_STATUS_REJECTED;
+ } else if (irq->flags & XICS_FLAGS_IRQ_LSI) {
+ irq->status &= ~XICS_STATUS_SENT;
+ }
+}
+
+void ics_resend(ICSState *ics)
+{
+ ICSStateClass *isc = ICS_GET_CLASS(ics);
+ int i;
+
+ if (isc->resend) {
+ isc->resend(ics);
+ return;
+ }
+
+ for (i = 0; i < ics->nr_irqs; i++) {
+ /* FIXME: filter by server#? */
+ if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) {
+ ics_resend_lsi(ics, i);
+ } else {
+ ics_resend_msi(ics, i);
+ }
+ }
+}
+
+static void ics_eoi(ICSState *ics, uint32_t nr)
+{
+ int srcno = nr - ics->offset;
+ ICSIRQState *irq = ics->irqs + srcno;
+
+ trace_xics_ics_eoi(nr);
+
+ if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
+ irq->status &= ~XICS_STATUS_SENT;
+ }
+}
+
+static void ics_reset_irq(ICSIRQState *irq)
+{
+ irq->priority = 0xff;
+ irq->saved_priority = 0xff;
+}
+
+static void ics_reset(DeviceState *dev)
+{
+ ICSState *ics = ICS(dev);
+ int i;
+ uint8_t flags[ics->nr_irqs];
+
+ for (i = 0; i < ics->nr_irqs; i++) {
+ flags[i] = ics->irqs[i].flags;
+ }
+
+ memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
+
+ for (i = 0; i < ics->nr_irqs; i++) {
+ ics_reset_irq(ics->irqs + i);
+ ics->irqs[i].flags = flags[i];
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ Error *local_err = NULL;
+
+ ics_set_kvm_state(ICS(dev), &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ }
+}
+
+static void ics_reset_handler(void *dev)
+{
+ ics_reset(dev);
+}
+
+static void ics_realize(DeviceState *dev, Error **errp)
+{
+ ICSState *ics = ICS(dev);
+
+ assert(ics->xics);
+
+ if (!ics->nr_irqs) {
+ error_setg(errp, "Number of interrupts needs to be greater 0");
+ return;
+ }
+ ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState));
+
+ qemu_register_reset(ics_reset_handler, ics);
+}
+
+static void ics_instance_init(Object *obj)
+{
+ ICSState *ics = ICS(obj);
+
+ ics->offset = XICS_IRQ_BASE;
+}
+
+static int ics_pre_save(void *opaque)
+{
+ ICSState *ics = opaque;
+
+ if (kvm_irqchip_in_kernel()) {
+ ics_get_kvm_state(ics);
+ }
+
+ return 0;
+}
+
+static int ics_post_load(void *opaque, int version_id)
+{
+ ICSState *ics = opaque;
+
+ if (kvm_irqchip_in_kernel()) {
+ Error *local_err = NULL;
+ int ret;
+
+ ret = ics_set_kvm_state(ics, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_ics_irq = {
+ .name = "ics/irq",
+ .version_id = 2,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(server, ICSIRQState),
+ VMSTATE_UINT8(priority, ICSIRQState),
+ VMSTATE_UINT8(saved_priority, ICSIRQState),
+ VMSTATE_UINT8(status, ICSIRQState),
+ VMSTATE_UINT8(flags, ICSIRQState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_ics = {
+ .name = "ics",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = ics_pre_save,
+ .post_load = ics_post_load,
+ .fields = (VMStateField[]) {
+ /* Sanity check */
+ VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL),
+
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
+ vmstate_ics_irq,
+ ICSIRQState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static Property ics_properties[] = {
+ DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0),
+ DEFINE_PROP_LINK(ICS_PROP_XICS, ICSState, xics, TYPE_XICS_FABRIC,
+ XICSFabric *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ics_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = ics_realize;
+ device_class_set_props(dc, ics_properties);
+ dc->reset = ics_reset;
+ dc->vmsd = &vmstate_ics;
+ /*
+ * Reason: part of XICS interrupt controller, needs to be wired up,
+ * e.g. by spapr_irq_init().
+ */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo ics_info = {
+ .name = TYPE_ICS,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(ICSState),
+ .instance_init = ics_instance_init,
+ .class_init = ics_class_init,
+ .class_size = sizeof(ICSStateClass),
+};
+
+static const TypeInfo xics_fabric_info = {
+ .name = TYPE_XICS_FABRIC,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(XICSFabricClass),
+};
+
+/*
+ * Exported functions
+ */
+ICPState *xics_icp_get(XICSFabric *xi, int server)
+{
+ XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
+
+ return xic->icp_get(xi, server);
+}
+
+void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
+{
+ assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK));
+
+ ics->irqs[srcno].flags |=
+ lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI;
+
+ if (kvm_irqchip_in_kernel()) {
+ Error *local_err = NULL;
+
+ ics_reset_irq(ics->irqs + srcno);
+ ics_set_kvm_state_one(ics, srcno, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ }
+}
+
+static void xics_register_types(void)
+{
+ type_register_static(&ics_info);
+ type_register_static(&icp_info);
+ type_register_static(&xics_fabric_info);
+}
+
+type_init(xics_register_types)
diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c
new file mode 100644
index 000000000..f5bfc501b
--- /dev/null
+++ b/hw/intc/xics_kvm.c
@@ -0,0 +1,509 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
+ *
+ * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics, in-kernel emulation
+ *
+ * Copyright (c) 2013 David Gibson, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "trace.h"
+#include "sysemu/kvm.h"
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_cpu_core.h"
+#include "hw/ppc/xics.h"
+#include "hw/ppc/xics_spapr.h"
+#include "kvm_ppc.h"
+#include "qemu/config-file.h"
+#include "qemu/error-report.h"
+
+#include <sys/ioctl.h>
+
+static int kernel_xics_fd = -1;
+
+typedef struct KVMEnabledICP {
+ unsigned long vcpu_id;
+ QLIST_ENTRY(KVMEnabledICP) node;
+} KVMEnabledICP;
+
+static QLIST_HEAD(, KVMEnabledICP)
+ kvm_enabled_icps = QLIST_HEAD_INITIALIZER(&kvm_enabled_icps);
+
+static void kvm_disable_icps(void)
+{
+ KVMEnabledICP *enabled_icp, *next;
+
+ QLIST_FOREACH_SAFE(enabled_icp, &kvm_enabled_icps, node, next) {
+ QLIST_REMOVE(enabled_icp, node);
+ g_free(enabled_icp);
+ }
+}
+
+/*
+ * ICP-KVM
+ */
+void icp_get_kvm_state(ICPState *icp)
+{
+ uint64_t state;
+ int ret;
+
+ /* The KVM XICS device is not in use */
+ if (kernel_xics_fd == -1) {
+ return;
+ }
+
+ /* ICP for this CPU thread is not in use, exiting */
+ if (!icp->cs) {
+ return;
+ }
+
+ ret = kvm_get_one_reg(icp->cs, KVM_REG_PPC_ICP_STATE, &state);
+ if (ret != 0) {
+ error_report("Unable to retrieve KVM interrupt controller state"
+ " for CPU %ld: %s", kvm_arch_vcpu_id(icp->cs), strerror(errno));
+ exit(1);
+ }
+
+ icp->xirr = state >> KVM_REG_PPC_ICP_XISR_SHIFT;
+ icp->mfrr = (state >> KVM_REG_PPC_ICP_MFRR_SHIFT)
+ & KVM_REG_PPC_ICP_MFRR_MASK;
+ icp->pending_priority = (state >> KVM_REG_PPC_ICP_PPRI_SHIFT)
+ & KVM_REG_PPC_ICP_PPRI_MASK;
+}
+
+static void do_icp_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
+{
+ icp_get_kvm_state(arg.host_ptr);
+}
+
+void icp_synchronize_state(ICPState *icp)
+{
+ if (icp->cs) {
+ run_on_cpu(icp->cs, do_icp_synchronize_state, RUN_ON_CPU_HOST_PTR(icp));
+ }
+}
+
+int icp_set_kvm_state(ICPState *icp, Error **errp)
+{
+ uint64_t state;
+ int ret;
+
+ /* The KVM XICS device is not in use */
+ if (kernel_xics_fd == -1) {
+ return 0;
+ }
+
+ /* ICP for this CPU thread is not in use, exiting */
+ if (!icp->cs) {
+ return 0;
+ }
+
+ state = ((uint64_t)icp->xirr << KVM_REG_PPC_ICP_XISR_SHIFT)
+ | ((uint64_t)icp->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT)
+ | ((uint64_t)icp->pending_priority << KVM_REG_PPC_ICP_PPRI_SHIFT);
+
+ ret = kvm_set_one_reg(icp->cs, KVM_REG_PPC_ICP_STATE, &state);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret,
+ "Unable to restore KVM interrupt controller state (0x%"
+ PRIx64 ") for CPU %ld", state,
+ kvm_arch_vcpu_id(icp->cs));
+ return ret;
+ }
+
+ return 0;
+}
+
+void icp_kvm_realize(DeviceState *dev, Error **errp)
+{
+ ICPState *icp = ICP(dev);
+ CPUState *cs;
+ KVMEnabledICP *enabled_icp;
+ unsigned long vcpu_id;
+ int ret;
+
+ /* The KVM XICS device is not in use */
+ if (kernel_xics_fd == -1) {
+ return;
+ }
+
+ cs = icp->cs;
+ vcpu_id = kvm_arch_vcpu_id(cs);
+
+ /*
+ * If we are reusing a parked vCPU fd corresponding to the CPU
+ * which was hot-removed earlier we don't have to renable
+ * KVM_CAP_IRQ_XICS capability again.
+ */
+ QLIST_FOREACH(enabled_icp, &kvm_enabled_icps, node) {
+ if (enabled_icp->vcpu_id == vcpu_id) {
+ return;
+ }
+ }
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_XICS, 0, kernel_xics_fd, vcpu_id);
+ if (ret < 0) {
+ Error *local_err = NULL;
+
+ error_setg(&local_err, "Unable to connect CPU%ld to kernel XICS: %s",
+ vcpu_id, strerror(errno));
+ if (errno == ENOSPC) {
+ error_append_hint(&local_err, "Try -smp maxcpus=N with N < %u\n",
+ MACHINE(qdev_get_machine())->smp.max_cpus);
+ }
+ error_propagate(errp, local_err);
+ return;
+ }
+ enabled_icp = g_malloc(sizeof(*enabled_icp));
+ enabled_icp->vcpu_id = vcpu_id;
+ QLIST_INSERT_HEAD(&kvm_enabled_icps, enabled_icp, node);
+}
+
+/*
+ * ICS-KVM
+ */
+void ics_get_kvm_state(ICSState *ics)
+{
+ uint64_t state;
+ int i;
+
+ /* The KVM XICS device is not in use */
+ if (kernel_xics_fd == -1) {
+ return;
+ }
+
+ for (i = 0; i < ics->nr_irqs; i++) {
+ ICSIRQState *irq = &ics->irqs[i];
+
+ if (ics_irq_free(ics, i)) {
+ continue;
+ }
+
+ kvm_device_access(kernel_xics_fd, KVM_DEV_XICS_GRP_SOURCES,
+ i + ics->offset, &state, false, &error_fatal);
+
+ irq->server = state & KVM_XICS_DESTINATION_MASK;
+ irq->saved_priority = (state >> KVM_XICS_PRIORITY_SHIFT)
+ & KVM_XICS_PRIORITY_MASK;
+ /*
+ * To be consistent with the software emulation in xics.c, we
+ * split out the masked state + priority that we get from the
+ * kernel into 'current priority' (0xff if masked) and
+ * 'saved priority' (if masked, this is the priority the
+ * interrupt had before it was masked). Masking and unmasking
+ * are done with the ibm,int-off and ibm,int-on RTAS calls.
+ */
+ if (state & KVM_XICS_MASKED) {
+ irq->priority = 0xff;
+ } else {
+ irq->priority = irq->saved_priority;
+ }
+
+ irq->status = 0;
+ if (state & KVM_XICS_PENDING) {
+ if (state & KVM_XICS_LEVEL_SENSITIVE) {
+ irq->status |= XICS_STATUS_ASSERTED;
+ } else {
+ /*
+ * A pending edge-triggered interrupt (or MSI)
+ * must have been rejected previously when we
+ * first detected it and tried to deliver it,
+ * so mark it as pending and previously rejected
+ * for consistency with how xics.c works.
+ */
+ irq->status |= XICS_STATUS_MASKED_PENDING
+ | XICS_STATUS_REJECTED;
+ }
+ }
+ if (state & KVM_XICS_PRESENTED) {
+ irq->status |= XICS_STATUS_PRESENTED;
+ }
+ if (state & KVM_XICS_QUEUED) {
+ irq->status |= XICS_STATUS_QUEUED;
+ }
+ }
+}
+
+void ics_synchronize_state(ICSState *ics)
+{
+ ics_get_kvm_state(ics);
+}
+
+int ics_set_kvm_state_one(ICSState *ics, int srcno, Error **errp)
+{
+ uint64_t state;
+ ICSIRQState *irq = &ics->irqs[srcno];
+ int ret;
+
+ /* The KVM XICS device is not in use */
+ if (kernel_xics_fd == -1) {
+ return 0;
+ }
+
+ state = irq->server;
+ state |= (uint64_t)(irq->saved_priority & KVM_XICS_PRIORITY_MASK)
+ << KVM_XICS_PRIORITY_SHIFT;
+ if (irq->priority != irq->saved_priority) {
+ assert(irq->priority == 0xff);
+ }
+
+ if (irq->priority == 0xff) {
+ state |= KVM_XICS_MASKED;
+ }
+
+ if (irq->flags & XICS_FLAGS_IRQ_LSI) {
+ state |= KVM_XICS_LEVEL_SENSITIVE;
+ if (irq->status & XICS_STATUS_ASSERTED) {
+ state |= KVM_XICS_PENDING;
+ }
+ } else {
+ if (irq->status & XICS_STATUS_MASKED_PENDING) {
+ state |= KVM_XICS_PENDING;
+ }
+ }
+ if (irq->status & XICS_STATUS_PRESENTED) {
+ state |= KVM_XICS_PRESENTED;
+ }
+ if (irq->status & XICS_STATUS_QUEUED) {
+ state |= KVM_XICS_QUEUED;
+ }
+
+ ret = kvm_device_access(kernel_xics_fd, KVM_DEV_XICS_GRP_SOURCES,
+ srcno + ics->offset, &state, true, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+int ics_set_kvm_state(ICSState *ics, Error **errp)
+{
+ int i;
+
+ /* The KVM XICS device is not in use */
+ if (kernel_xics_fd == -1) {
+ return 0;
+ }
+
+ for (i = 0; i < ics->nr_irqs; i++) {
+ int ret;
+
+ if (ics_irq_free(ics, i)) {
+ continue;
+ }
+
+ ret = ics_set_kvm_state_one(ics, i, errp);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void ics_kvm_set_irq(ICSState *ics, int srcno, int val)
+{
+ struct kvm_irq_level args;
+ int rc;
+
+ /* The KVM XICS device should be in use */
+ assert(kernel_xics_fd != -1);
+
+ args.irq = srcno + ics->offset;
+ if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MSI) {
+ if (!val) {
+ return;
+ }
+ args.level = KVM_INTERRUPT_SET;
+ } else {
+ args.level = val ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
+ }
+ rc = kvm_vm_ioctl(kvm_state, KVM_IRQ_LINE, &args);
+ if (rc < 0) {
+ perror("kvm_irq_line");
+ }
+}
+
+int xics_kvm_connect(SpaprInterruptController *intc, uint32_t nr_servers,
+ Error **errp)
+{
+ ICSState *ics = ICS_SPAPR(intc);
+ int rc;
+ CPUState *cs;
+ Error *local_err = NULL;
+
+ /*
+ * The KVM XICS device already in use. This is the case when
+ * rebooting under the XICS-only interrupt mode.
+ */
+ if (kernel_xics_fd != -1) {
+ return 0;
+ }
+
+ if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_IRQ_XICS)) {
+ error_setg(errp,
+ "KVM and IRQ_XICS capability must be present for in-kernel XICS");
+ return -1;
+ }
+
+ rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_SET_XIVE, "ibm,set-xive");
+ if (rc < 0) {
+ error_setg_errno(&local_err, -rc,
+ "kvmppc_define_rtas_kernel_token: ibm,set-xive");
+ goto fail;
+ }
+
+ rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_GET_XIVE, "ibm,get-xive");
+ if (rc < 0) {
+ error_setg_errno(&local_err, -rc,
+ "kvmppc_define_rtas_kernel_token: ibm,get-xive");
+ goto fail;
+ }
+
+ rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_ON, "ibm,int-on");
+ if (rc < 0) {
+ error_setg_errno(&local_err, -rc,
+ "kvmppc_define_rtas_kernel_token: ibm,int-on");
+ goto fail;
+ }
+
+ rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_OFF, "ibm,int-off");
+ if (rc < 0) {
+ error_setg_errno(&local_err, -rc,
+ "kvmppc_define_rtas_kernel_token: ibm,int-off");
+ goto fail;
+ }
+
+ /* Create the KVM XICS device */
+ rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false);
+ if (rc < 0) {
+ error_setg_errno(&local_err, -rc, "Error on KVM_CREATE_DEVICE for XICS");
+ goto fail;
+ }
+
+ /* Tell KVM about the # of VCPUs we may have (POWER9 and newer only) */
+ if (kvm_device_check_attr(rc, KVM_DEV_XICS_GRP_CTRL,
+ KVM_DEV_XICS_NR_SERVERS)) {
+ if (kvm_device_access(rc, KVM_DEV_XICS_GRP_CTRL,
+ KVM_DEV_XICS_NR_SERVERS, &nr_servers, true,
+ &local_err)) {
+ goto fail;
+ }
+ }
+
+ kernel_xics_fd = rc;
+ kvm_kernel_irqchip = true;
+ kvm_msi_via_irqfd_allowed = true;
+ kvm_gsi_direct_mapping = true;
+
+ /* Create the presenters */
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ icp_kvm_realize(DEVICE(spapr_cpu_state(cpu)->icp), &local_err);
+ if (local_err) {
+ goto fail;
+ }
+ }
+
+ /* Update the KVM sources */
+ ics_set_kvm_state(ics, &local_err);
+ if (local_err) {
+ goto fail;
+ }
+
+ /* Connect the presenters to the initial VCPUs of the machine */
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ icp_set_kvm_state(spapr_cpu_state(cpu)->icp, &local_err);
+ if (local_err) {
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ error_propagate(errp, local_err);
+ xics_kvm_disconnect(intc);
+ return -1;
+}
+
+void xics_kvm_disconnect(SpaprInterruptController *intc)
+{
+ /*
+ * Only on P9 using the XICS-on XIVE KVM device:
+ *
+ * When the KVM device fd is closed, the device is destroyed and
+ * removed from the list of devices of the VM. The VCPU presenters
+ * are also detached from the device.
+ */
+ if (kernel_xics_fd != -1) {
+ close(kernel_xics_fd);
+ kernel_xics_fd = -1;
+ }
+
+ kvmppc_define_rtas_kernel_token(0, "ibm,set-xive");
+ kvmppc_define_rtas_kernel_token(0, "ibm,get-xive");
+ kvmppc_define_rtas_kernel_token(0, "ibm,int-on");
+ kvmppc_define_rtas_kernel_token(0, "ibm,int-off");
+
+ kvm_kernel_irqchip = false;
+ kvm_msi_via_irqfd_allowed = false;
+ kvm_gsi_direct_mapping = false;
+
+ /* Clear the presenter from the VCPUs */
+ kvm_disable_icps();
+}
+
+/*
+ * This is a heuristic to detect older KVMs on POWER9 hosts that don't
+ * support destruction of a KVM XICS device while the VM is running.
+ * Required to start a spapr machine with ic-mode=dual,kernel-irqchip=on.
+ */
+bool xics_kvm_has_broken_disconnect(void)
+{
+ int rc;
+
+ rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false);
+ if (rc < 0) {
+ /*
+ * The error is ignored on purpose. The KVM XICS setup code
+ * will catch it again anyway. The goal here is to see if
+ * close() actually destroys the device or not.
+ */
+ return false;
+ }
+
+ close(rc);
+
+ rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false);
+ if (rc >= 0) {
+ close(rc);
+ return false;
+ }
+
+ return errno == EEXIST;
+}
diff --git a/hw/intc/xics_pnv.c b/hw/intc/xics_pnv.c
new file mode 100644
index 000000000..753c067f1
--- /dev/null
+++ b/hw/intc/xics_pnv.c
@@ -0,0 +1,202 @@
+/*
+ * QEMU PowerPC PowerNV Interrupt Control Presenter (ICP) model
+ *
+ * Copyright (c) 2017, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "hw/ppc/xics.h"
+
+#define ICP_XIRR_POLL 0 /* 1 byte (CPRR) or 4 bytes */
+#define ICP_XIRR 4 /* 1 byte (CPRR) or 4 bytes */
+#define ICP_MFRR 12 /* 1 byte access only */
+
+#define ICP_LINKA 16 /* unused */
+#define ICP_LINKB 20 /* unused */
+#define ICP_LINKC 24 /* unused */
+
+static uint64_t pnv_icp_read(void *opaque, hwaddr addr, unsigned width)
+{
+ ICPState *icp = ICP(opaque);
+ PnvICPState *picp = PNV_ICP(opaque);
+ bool byte0 = (width == 1 && (addr & 0x3) == 0);
+ uint64_t val = 0xffffffff;
+
+ switch (addr & 0xffc) {
+ case ICP_XIRR_POLL:
+ val = icp_ipoll(icp, NULL);
+ if (byte0) {
+ val >>= 24;
+ } else if (width != 4) {
+ goto bad_access;
+ }
+ break;
+ case ICP_XIRR:
+ if (byte0) {
+ val = icp_ipoll(icp, NULL) >> 24;
+ } else if (width == 4) {
+ val = icp_accept(icp);
+ } else {
+ goto bad_access;
+ }
+ break;
+ case ICP_MFRR:
+ if (byte0) {
+ val = icp->mfrr;
+ } else {
+ goto bad_access;
+ }
+ break;
+ case ICP_LINKA:
+ if (width == 4) {
+ val = picp->links[0];
+ } else {
+ goto bad_access;
+ }
+ break;
+ case ICP_LINKB:
+ if (width == 4) {
+ val = picp->links[1];
+ } else {
+ goto bad_access;
+ }
+ break;
+ case ICP_LINKC:
+ if (width == 4) {
+ val = picp->links[2];
+ } else {
+ goto bad_access;
+ }
+ break;
+ default:
+bad_access:
+ qemu_log_mask(LOG_GUEST_ERROR, "XICS: Bad ICP access 0x%"
+ HWADDR_PRIx"/%d\n", addr, width);
+ }
+
+ return val;
+}
+
+static void pnv_icp_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ ICPState *icp = ICP(opaque);
+ PnvICPState *picp = PNV_ICP(opaque);
+ bool byte0 = (width == 1 && (addr & 0x3) == 0);
+
+ switch (addr & 0xffc) {
+ case ICP_XIRR:
+ if (byte0) {
+ icp_set_cppr(icp, val);
+ } else if (width == 4) {
+ icp_eoi(icp, val);
+ } else {
+ goto bad_access;
+ }
+ break;
+ case ICP_MFRR:
+ if (byte0) {
+ icp_set_mfrr(icp, val);
+ } else {
+ goto bad_access;
+ }
+ break;
+ case ICP_LINKA:
+ if (width == 4) {
+ picp->links[0] = val;
+ } else {
+ goto bad_access;
+ }
+ break;
+ case ICP_LINKB:
+ if (width == 4) {
+ picp->links[1] = val;
+ } else {
+ goto bad_access;
+ }
+ break;
+ case ICP_LINKC:
+ if (width == 4) {
+ picp->links[2] = val;
+ } else {
+ goto bad_access;
+ }
+ break;
+ default:
+bad_access:
+ qemu_log_mask(LOG_GUEST_ERROR, "XICS: Bad ICP access 0x%"
+ HWADDR_PRIx"/%d\n", addr, width);
+ }
+}
+
+static const MemoryRegionOps pnv_icp_ops = {
+ .read = pnv_icp_read,
+ .write = pnv_icp_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+};
+
+static void pnv_icp_realize(DeviceState *dev, Error **errp)
+{
+ ICPState *icp = ICP(dev);
+ PnvICPState *pnv_icp = PNV_ICP(icp);
+ ICPStateClass *icpc = ICP_GET_CLASS(icp);
+ Error *local_err = NULL;
+
+ icpc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ memory_region_init_io(&pnv_icp->mmio, OBJECT(icp), &pnv_icp_ops,
+ icp, "icp-thread", 0x1000);
+}
+
+static void pnv_icp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ICPStateClass *icpc = ICP_CLASS(klass);
+
+ device_class_set_parent_realize(dc, pnv_icp_realize,
+ &icpc->parent_realize);
+ dc->desc = "PowerNV ICP";
+}
+
+static const TypeInfo pnv_icp_info = {
+ .name = TYPE_PNV_ICP,
+ .parent = TYPE_ICP,
+ .instance_size = sizeof(PnvICPState),
+ .class_init = pnv_icp_class_init,
+ .class_size = sizeof(ICPStateClass),
+};
+
+static void pnv_icp_register_types(void)
+{
+ type_register_static(&pnv_icp_info);
+}
+
+type_init(pnv_icp_register_types)
diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c
new file mode 100644
index 000000000..37b2d9997
--- /dev/null
+++ b/hw/intc/xics_spapr.c
@@ -0,0 +1,476 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
+ *
+ * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
+ *
+ * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "trace.h"
+#include "qemu/timer.h"
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_cpu_core.h"
+#include "hw/ppc/xics.h"
+#include "hw/ppc/xics_spapr.h"
+#include "hw/ppc/fdt.h"
+#include "qapi/visitor.h"
+
+/*
+ * Guest interfaces
+ */
+
+static bool check_emulated_xics(SpaprMachineState *spapr, const char *func)
+{
+ if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ||
+ kvm_irqchip_in_kernel()) {
+ error_report("pseries: %s must only be called for emulated XICS",
+ func);
+ return false;
+ }
+
+ return true;
+}
+
+#define CHECK_EMULATED_XICS_HCALL(spapr) \
+ do { \
+ if (!check_emulated_xics((spapr), __func__)) { \
+ return H_HARDWARE; \
+ } \
+ } while (0)
+
+static target_ulong h_cppr(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong cppr = args[0];
+
+ CHECK_EMULATED_XICS_HCALL(spapr);
+
+ icp_set_cppr(spapr_cpu_state(cpu)->icp, cppr);
+ return H_SUCCESS;
+}
+
+static target_ulong h_ipi(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong mfrr = args[1];
+ ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), args[0]);
+
+ CHECK_EMULATED_XICS_HCALL(spapr);
+
+ if (!icp) {
+ return H_PARAMETER;
+ }
+
+ icp_set_mfrr(icp, mfrr);
+ return H_SUCCESS;
+}
+
+static target_ulong h_xirr(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp);
+
+ CHECK_EMULATED_XICS_HCALL(spapr);
+
+ args[0] = xirr;
+ return H_SUCCESS;
+}
+
+static target_ulong h_xirr_x(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp);
+
+ CHECK_EMULATED_XICS_HCALL(spapr);
+
+ args[0] = xirr;
+ args[1] = cpu_get_host_ticks();
+ return H_SUCCESS;
+}
+
+static target_ulong h_eoi(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong xirr = args[0];
+
+ CHECK_EMULATED_XICS_HCALL(spapr);
+
+ icp_eoi(spapr_cpu_state(cpu)->icp, xirr);
+ return H_SUCCESS;
+}
+
+static target_ulong h_ipoll(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), args[0]);
+ uint32_t mfrr;
+ uint32_t xirr;
+
+ CHECK_EMULATED_XICS_HCALL(spapr);
+
+ if (!icp) {
+ return H_PARAMETER;
+ }
+
+ xirr = icp_ipoll(icp, &mfrr);
+
+ args[0] = xirr;
+ args[1] = mfrr;
+
+ return H_SUCCESS;
+}
+
+#define CHECK_EMULATED_XICS_RTAS(spapr, rets) \
+ do { \
+ if (!check_emulated_xics((spapr), __func__)) { \
+ rtas_st((rets), 0, RTAS_OUT_HW_ERROR); \
+ return; \
+ } \
+ } while (0)
+
+static void rtas_set_xive(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ ICSState *ics = spapr->ics;
+ uint32_t nr, srcno, server, priority;
+
+ CHECK_EMULATED_XICS_RTAS(spapr, rets);
+
+ if ((nargs != 3) || (nret != 1)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ if (!ics) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ nr = rtas_ld(args, 0);
+ server = rtas_ld(args, 1);
+ priority = rtas_ld(args, 2);
+
+ if (!ics_valid_irq(ics, nr) || !xics_icp_get(XICS_FABRIC(spapr), server)
+ || (priority > 0xff)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ srcno = nr - ics->offset;
+ ics_write_xive(ics, srcno, server, priority, priority);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void rtas_get_xive(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ ICSState *ics = spapr->ics;
+ uint32_t nr, srcno;
+
+ CHECK_EMULATED_XICS_RTAS(spapr, rets);
+
+ if ((nargs != 1) || (nret != 3)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ if (!ics) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ nr = rtas_ld(args, 0);
+
+ if (!ics_valid_irq(ics, nr)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ srcno = nr - ics->offset;
+ rtas_st(rets, 1, ics->irqs[srcno].server);
+ rtas_st(rets, 2, ics->irqs[srcno].priority);
+}
+
+static void rtas_int_off(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ ICSState *ics = spapr->ics;
+ uint32_t nr, srcno;
+
+ CHECK_EMULATED_XICS_RTAS(spapr, rets);
+
+ if ((nargs != 1) || (nret != 1)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ if (!ics) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ nr = rtas_ld(args, 0);
+
+ if (!ics_valid_irq(ics, nr)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ srcno = nr - ics->offset;
+ ics_write_xive(ics, srcno, ics->irqs[srcno].server, 0xff,
+ ics->irqs[srcno].priority);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void rtas_int_on(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ ICSState *ics = spapr->ics;
+ uint32_t nr, srcno;
+
+ CHECK_EMULATED_XICS_RTAS(spapr, rets);
+
+ if ((nargs != 1) || (nret != 1)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ if (!ics) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ nr = rtas_ld(args, 0);
+
+ if (!ics_valid_irq(ics, nr)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ srcno = nr - ics->offset;
+ ics_write_xive(ics, srcno, ics->irqs[srcno].server,
+ ics->irqs[srcno].saved_priority,
+ ics->irqs[srcno].saved_priority);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void ics_spapr_realize(DeviceState *dev, Error **errp)
+{
+ ICSState *ics = ICS_SPAPR(dev);
+ ICSStateClass *icsc = ICS_GET_CLASS(ics);
+ Error *local_err = NULL;
+
+ icsc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive);
+ spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive);
+ spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off);
+ spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on);
+
+ spapr_register_hypercall(H_CPPR, h_cppr);
+ spapr_register_hypercall(H_IPI, h_ipi);
+ spapr_register_hypercall(H_XIRR, h_xirr);
+ spapr_register_hypercall(H_XIRR_X, h_xirr_x);
+ spapr_register_hypercall(H_EOI, h_eoi);
+ spapr_register_hypercall(H_IPOLL, h_ipoll);
+}
+
+static void xics_spapr_dt(SpaprInterruptController *intc, uint32_t nr_servers,
+ void *fdt, uint32_t phandle)
+{
+ uint32_t interrupt_server_ranges_prop[] = {
+ 0, cpu_to_be32(nr_servers),
+ };
+ int node;
+
+ _FDT(node = fdt_add_subnode(fdt, 0, "interrupt-controller"));
+
+ _FDT(fdt_setprop_string(fdt, node, "device_type",
+ "PowerPC-External-Interrupt-Presentation"));
+ _FDT(fdt_setprop_string(fdt, node, "compatible", "IBM,ppc-xicp"));
+ _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
+ _FDT(fdt_setprop(fdt, node, "ibm,interrupt-server-ranges",
+ interrupt_server_ranges_prop,
+ sizeof(interrupt_server_ranges_prop)));
+ _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
+ _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
+ _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
+}
+
+static int xics_spapr_cpu_intc_create(SpaprInterruptController *intc,
+ PowerPCCPU *cpu, Error **errp)
+{
+ ICSState *ics = ICS_SPAPR(intc);
+ Object *obj;
+ SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+
+ obj = icp_create(OBJECT(cpu), TYPE_ICP, ics->xics, errp);
+ if (!obj) {
+ return -1;
+ }
+
+ spapr_cpu->icp = ICP(obj);
+ return 0;
+}
+
+static void xics_spapr_cpu_intc_reset(SpaprInterruptController *intc,
+ PowerPCCPU *cpu)
+{
+ icp_reset(spapr_cpu_state(cpu)->icp);
+}
+
+static void xics_spapr_cpu_intc_destroy(SpaprInterruptController *intc,
+ PowerPCCPU *cpu)
+{
+ SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+
+ icp_destroy(spapr_cpu->icp);
+ spapr_cpu->icp = NULL;
+}
+
+static int xics_spapr_claim_irq(SpaprInterruptController *intc, int irq,
+ bool lsi, Error **errp)
+{
+ ICSState *ics = ICS_SPAPR(intc);
+
+ assert(ics);
+ assert(ics_valid_irq(ics, irq));
+
+ if (!ics_irq_free(ics, irq - ics->offset)) {
+ error_setg(errp, "IRQ %d is not free", irq);
+ return -EBUSY;
+ }
+
+ ics_set_irq_type(ics, irq - ics->offset, lsi);
+ return 0;
+}
+
+static void xics_spapr_free_irq(SpaprInterruptController *intc, int irq)
+{
+ ICSState *ics = ICS_SPAPR(intc);
+ uint32_t srcno = irq - ics->offset;
+
+ assert(ics_valid_irq(ics, irq));
+
+ memset(&ics->irqs[srcno], 0, sizeof(ICSIRQState));
+}
+
+static void xics_spapr_set_irq(SpaprInterruptController *intc, int irq, int val)
+{
+ ICSState *ics = ICS_SPAPR(intc);
+ uint32_t srcno = irq - ics->offset;
+
+ ics_set_irq(ics, srcno, val);
+}
+
+static void xics_spapr_print_info(SpaprInterruptController *intc, Monitor *mon)
+{
+ ICSState *ics = ICS_SPAPR(intc);
+ CPUState *cs;
+
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
+ }
+
+ ics_pic_print_info(ics, mon);
+}
+
+static int xics_spapr_post_load(SpaprInterruptController *intc, int version_id)
+{
+ if (!kvm_irqchip_in_kernel()) {
+ CPUState *cs;
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ icp_resend(spapr_cpu_state(cpu)->icp);
+ }
+ }
+ return 0;
+}
+
+static int xics_spapr_activate(SpaprInterruptController *intc,
+ uint32_t nr_servers, Error **errp)
+{
+ if (kvm_enabled()) {
+ return spapr_irq_init_kvm(xics_kvm_connect, intc, nr_servers, errp);
+ }
+ return 0;
+}
+
+static void xics_spapr_deactivate(SpaprInterruptController *intc)
+{
+ if (kvm_irqchip_in_kernel()) {
+ xics_kvm_disconnect(intc);
+ }
+}
+
+static void ics_spapr_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ICSStateClass *isc = ICS_CLASS(klass);
+ SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
+
+ device_class_set_parent_realize(dc, ics_spapr_realize,
+ &isc->parent_realize);
+ sicc->activate = xics_spapr_activate;
+ sicc->deactivate = xics_spapr_deactivate;
+ sicc->cpu_intc_create = xics_spapr_cpu_intc_create;
+ sicc->cpu_intc_reset = xics_spapr_cpu_intc_reset;
+ sicc->cpu_intc_destroy = xics_spapr_cpu_intc_destroy;
+ sicc->claim_irq = xics_spapr_claim_irq;
+ sicc->free_irq = xics_spapr_free_irq;
+ sicc->set_irq = xics_spapr_set_irq;
+ sicc->print_info = xics_spapr_print_info;
+ sicc->dt = xics_spapr_dt;
+ sicc->post_load = xics_spapr_post_load;
+}
+
+static const TypeInfo ics_spapr_info = {
+ .name = TYPE_ICS_SPAPR,
+ .parent = TYPE_ICS,
+ .class_init = ics_spapr_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_SPAPR_INTC },
+ { }
+ },
+};
+
+static void xics_spapr_register_types(void)
+{
+ type_register_static(&ics_spapr_info);
+}
+
+type_init(xics_spapr_register_types)
diff --git a/hw/intc/xilinx_intc.c b/hw/intc/xilinx_intc.c
new file mode 100644
index 000000000..4c4397b3d
--- /dev/null
+++ b/hw/intc/xilinx_intc.c
@@ -0,0 +1,206 @@
+/*
+ * QEMU Xilinx OPB Interrupt Controller.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "qemu/module.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "qom/object.h"
+
+#define D(x)
+
+#define R_ISR 0
+#define R_IPR 1
+#define R_IER 2
+#define R_IAR 3
+#define R_SIE 4
+#define R_CIE 5
+#define R_IVR 6
+#define R_MER 7
+#define R_MAX 8
+
+#define TYPE_XILINX_INTC "xlnx.xps-intc"
+DECLARE_INSTANCE_CHECKER(struct xlx_pic, XILINX_INTC,
+ TYPE_XILINX_INTC)
+
+struct xlx_pic
+{
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+ qemu_irq parent_irq;
+
+ /* Configuration reg chosen at synthesis-time. QEMU populates
+ the bits at board-setup. */
+ uint32_t c_kind_of_intr;
+
+ /* Runtime control registers. */
+ uint32_t regs[R_MAX];
+ /* state of the interrupt input pins */
+ uint32_t irq_pin_state;
+};
+
+static void update_irq(struct xlx_pic *p)
+{
+ uint32_t i;
+
+ /* level triggered interrupt */
+ if (p->regs[R_MER] & 2) {
+ p->regs[R_ISR] |= p->irq_pin_state & ~p->c_kind_of_intr;
+ }
+
+ /* Update the pending register. */
+ p->regs[R_IPR] = p->regs[R_ISR] & p->regs[R_IER];
+
+ /* Update the vector register. */
+ for (i = 0; i < 32; i++) {
+ if (p->regs[R_IPR] & (1U << i)) {
+ break;
+ }
+ }
+ if (i == 32)
+ i = ~0;
+
+ p->regs[R_IVR] = i;
+ qemu_set_irq(p->parent_irq, (p->regs[R_MER] & 1) && p->regs[R_IPR]);
+}
+
+static uint64_t
+pic_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ struct xlx_pic *p = opaque;
+ uint32_t r = 0;
+
+ addr >>= 2;
+ switch (addr)
+ {
+ default:
+ if (addr < ARRAY_SIZE(p->regs))
+ r = p->regs[addr];
+ break;
+
+ }
+ D(printf("%s %x=%x\n", __func__, addr * 4, r));
+ return r;
+}
+
+static void
+pic_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ struct xlx_pic *p = opaque;
+ uint32_t value = val64;
+
+ addr >>= 2;
+ D(qemu_log("%s addr=%x val=%x\n", __func__, addr * 4, value));
+ switch (addr)
+ {
+ case R_IAR:
+ p->regs[R_ISR] &= ~value; /* ACK. */
+ break;
+ case R_SIE:
+ p->regs[R_IER] |= value; /* Atomic set ie. */
+ break;
+ case R_CIE:
+ p->regs[R_IER] &= ~value; /* Atomic clear ie. */
+ break;
+ case R_MER:
+ p->regs[R_MER] = value & 0x3;
+ break;
+ case R_ISR:
+ if ((p->regs[R_MER] & 2)) {
+ break;
+ }
+ /* fallthrough */
+ default:
+ if (addr < ARRAY_SIZE(p->regs))
+ p->regs[addr] = value;
+ break;
+ }
+ update_irq(p);
+}
+
+static const MemoryRegionOps pic_ops = {
+ .read = pic_read,
+ .write = pic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4
+ }
+};
+
+static void irq_handler(void *opaque, int irq, int level)
+{
+ struct xlx_pic *p = opaque;
+
+ /* edge triggered interrupt */
+ if (p->c_kind_of_intr & (1 << irq) && p->regs[R_MER] & 2) {
+ p->regs[R_ISR] |= (level << irq);
+ }
+
+ p->irq_pin_state &= ~(1 << irq);
+ p->irq_pin_state |= level << irq;
+ update_irq(p);
+}
+
+static void xilinx_intc_init(Object *obj)
+{
+ struct xlx_pic *p = XILINX_INTC(obj);
+
+ qdev_init_gpio_in(DEVICE(obj), irq_handler, 32);
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &p->parent_irq);
+
+ memory_region_init_io(&p->mmio, obj, &pic_ops, p, "xlnx.xps-intc",
+ R_MAX * 4);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &p->mmio);
+}
+
+static Property xilinx_intc_properties[] = {
+ DEFINE_PROP_UINT32("kind-of-intr", struct xlx_pic, c_kind_of_intr, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xilinx_intc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, xilinx_intc_properties);
+}
+
+static const TypeInfo xilinx_intc_info = {
+ .name = TYPE_XILINX_INTC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(struct xlx_pic),
+ .instance_init = xilinx_intc_init,
+ .class_init = xilinx_intc_class_init,
+};
+
+static void xilinx_intc_register_types(void)
+{
+ type_register_static(&xilinx_intc_info);
+}
+
+type_init(xilinx_intc_register_types)
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
new file mode 100644
index 000000000..190194d27
--- /dev/null
+++ b/hw/intc/xive.c
@@ -0,0 +1,1983 @@
+/*
+ * QEMU PowerPC XIVE interrupt controller model
+ *
+ * Copyright (c) 2017-2018, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "target/ppc/cpu.h"
+#include "sysemu/cpus.h"
+#include "sysemu/dma.h"
+#include "sysemu/reset.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "monitor/monitor.h"
+#include "hw/irq.h"
+#include "hw/ppc/xive.h"
+#include "hw/ppc/xive_regs.h"
+#include "trace.h"
+
+/*
+ * XIVE Thread Interrupt Management context
+ */
+
+/*
+ * Convert an Interrupt Pending Buffer (IPB) register to a Pending
+ * Interrupt Priority Register (PIPR), which contains the priority of
+ * the most favored pending notification.
+ */
+static uint8_t ipb_to_pipr(uint8_t ibp)
+{
+ return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
+}
+
+static uint8_t exception_mask(uint8_t ring)
+{
+ switch (ring) {
+ case TM_QW1_OS:
+ return TM_QW1_NSR_EO;
+ case TM_QW3_HV_PHYS:
+ return TM_QW3_NSR_HE;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
+{
+ switch (ring) {
+ case TM_QW0_USER:
+ return 0; /* Not supported */
+ case TM_QW1_OS:
+ return tctx->os_output;
+ case TM_QW2_HV_POOL:
+ case TM_QW3_HV_PHYS:
+ return tctx->hv_output;
+ default:
+ return 0;
+ }
+}
+
+static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
+{
+ uint8_t *regs = &tctx->regs[ring];
+ uint8_t nsr = regs[TM_NSR];
+ uint8_t mask = exception_mask(ring);
+
+ qemu_irq_lower(xive_tctx_output(tctx, ring));
+
+ if (regs[TM_NSR] & mask) {
+ uint8_t cppr = regs[TM_PIPR];
+
+ regs[TM_CPPR] = cppr;
+
+ /* Reset the pending buffer bit */
+ regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
+ regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
+
+ /* Drop Exception bit */
+ regs[TM_NSR] &= ~mask;
+
+ trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
+ regs[TM_IPB], regs[TM_PIPR],
+ regs[TM_CPPR], regs[TM_NSR]);
+ }
+
+ return (nsr << 8) | regs[TM_CPPR];
+}
+
+static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
+{
+ uint8_t *regs = &tctx->regs[ring];
+
+ if (regs[TM_PIPR] < regs[TM_CPPR]) {
+ switch (ring) {
+ case TM_QW1_OS:
+ regs[TM_NSR] |= TM_QW1_NSR_EO;
+ break;
+ case TM_QW3_HV_PHYS:
+ regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
+ regs[TM_IPB], regs[TM_PIPR],
+ regs[TM_CPPR], regs[TM_NSR]);
+ qemu_irq_raise(xive_tctx_output(tctx, ring));
+ }
+}
+
+static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
+{
+ uint8_t *regs = &tctx->regs[ring];
+
+ trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
+ regs[TM_IPB], regs[TM_PIPR],
+ cppr, regs[TM_NSR]);
+
+ if (cppr > XIVE_PRIORITY_MAX) {
+ cppr = 0xff;
+ }
+
+ tctx->regs[ring + TM_CPPR] = cppr;
+
+ /* CPPR has changed, check if we need to raise a pending exception */
+ xive_tctx_notify(tctx, ring);
+}
+
+void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb)
+{
+ uint8_t *regs = &tctx->regs[ring];
+
+ regs[TM_IPB] |= ipb;
+ regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
+ xive_tctx_notify(tctx, ring);
+}
+
+/*
+ * XIVE Thread Interrupt Management Area (TIMA)
+ */
+
+static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
+}
+
+static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size)
+{
+ return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
+}
+
+static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size)
+{
+ uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
+ uint32_t qw2w2;
+
+ qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
+ memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
+ return qw2w2;
+}
+
+static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
+}
+
+static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size)
+{
+ return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
+}
+
+/*
+ * Define an access map for each page of the TIMA that we will use in
+ * the memory region ops to filter values when doing loads and stores
+ * of raw registers values
+ *
+ * Registers accessibility bits :
+ *
+ * 0x0 - no access
+ * 0x1 - write only
+ * 0x2 - read only
+ * 0x3 - read/write
+ */
+
+static const uint8_t xive_tm_hw_view[] = {
+ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
+ 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
+ 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
+ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
+};
+
+static const uint8_t xive_tm_hv_view[] = {
+ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
+ 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
+ 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
+ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
+};
+
+static const uint8_t xive_tm_os_view[] = {
+ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
+ 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
+};
+
+static const uint8_t xive_tm_user_view[] = {
+ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
+};
+
+/*
+ * Overall TIMA access map for the thread interrupt management context
+ * registers
+ */
+static const uint8_t *xive_tm_views[] = {
+ [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
+ [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
+ [XIVE_TM_OS_PAGE] = xive_tm_os_view,
+ [XIVE_TM_USER_PAGE] = xive_tm_user_view,
+};
+
+/*
+ * Computes a register access mask for a given offset in the TIMA
+ */
+static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
+{
+ uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
+ uint8_t reg_offset = offset & 0x3F;
+ uint8_t reg_mask = write ? 0x1 : 0x2;
+ uint64_t mask = 0x0;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
+ mask |= (uint64_t) 0xff << (8 * (size - i - 1));
+ }
+ }
+
+ return mask;
+}
+
+static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ uint8_t ring_offset = offset & 0x30;
+ uint8_t reg_offset = offset & 0x3F;
+ uint64_t mask = xive_tm_mask(offset, size, true);
+ int i;
+
+ /*
+ * Only 4 or 8 bytes stores are allowed and the User ring is
+ * excluded
+ */
+ if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
+ HWADDR_PRIx"\n", offset);
+ return;
+ }
+
+ /*
+ * Use the register offset for the raw values and filter out
+ * reserved values
+ */
+ for (i = 0; i < size; i++) {
+ uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
+ if (byte_mask) {
+ tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
+ byte_mask;
+ }
+ }
+}
+
+static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
+{
+ uint8_t ring_offset = offset & 0x30;
+ uint8_t reg_offset = offset & 0x3F;
+ uint64_t mask = xive_tm_mask(offset, size, false);
+ uint64_t ret;
+ int i;
+
+ /*
+ * Only 4 or 8 bytes loads are allowed and the User ring is
+ * excluded
+ */
+ if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
+ HWADDR_PRIx"\n", offset);
+ return -1;
+ }
+
+ /* Use the register offset for the raw values */
+ ret = 0;
+ for (i = 0; i < size; i++) {
+ ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
+ }
+
+ /* filter out reserved values */
+ return ret & mask;
+}
+
+/*
+ * The TM context is mapped twice within each page. Stores and loads
+ * to the first mapping below 2K write and read the specified values
+ * without modification. The second mapping above 2K performs specific
+ * state changes (side effects) in addition to setting/returning the
+ * interrupt management area context of the processor thread.
+ */
+static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size)
+{
+ return xive_tctx_accept(tctx, TM_QW1_OS);
+}
+
+static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
+}
+
+/*
+ * Adjust the IPB to allow a CPU to process event queues of other
+ * priorities during one physical interrupt cycle.
+ */
+static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff));
+}
+
+static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk,
+ uint32_t *nvt_idx, bool *vo)
+{
+ if (nvt_blk) {
+ *nvt_blk = xive_nvt_blk(cam);
+ }
+ if (nvt_idx) {
+ *nvt_idx = xive_nvt_idx(cam);
+ }
+ if (vo) {
+ *vo = !!(cam & TM_QW1W2_VO);
+ }
+}
+
+static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk,
+ uint32_t *nvt_idx, bool *vo)
+{
+ uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
+ uint32_t cam = be32_to_cpu(qw1w2);
+
+ xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo);
+ return qw1w2;
+}
+
+static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2)
+{
+ memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
+}
+
+static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size)
+{
+ uint32_t qw1w2;
+ uint32_t qw1w2_new;
+ uint8_t nvt_blk;
+ uint32_t nvt_idx;
+ bool vo;
+
+ qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo);
+
+ if (!vo) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n",
+ nvt_blk, nvt_idx);
+ }
+
+ /* Invalidate CAM line */
+ qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0);
+ xive_tctx_set_os_cam(tctx, qw1w2_new);
+ return qw1w2;
+}
+
+static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
+ uint8_t nvt_blk, uint32_t nvt_idx)
+{
+ XiveNVT nvt;
+ uint8_t ipb;
+
+ /*
+ * Grab the associated NVT to pull the pending bits, and merge
+ * them with the IPB of the thread interrupt context registers
+ */
+ if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n",
+ nvt_blk, nvt_idx);
+ return;
+ }
+
+ ipb = xive_get_field32(NVT_W4_IPB, nvt.w4);
+
+ if (ipb) {
+ /* Reset the NVT value */
+ nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0);
+ xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
+
+ /* Merge in current context */
+ xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
+ }
+}
+
+/*
+ * Updating the OS CAM line can trigger a resend of interrupt
+ */
+static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ uint32_t cam = value;
+ uint32_t qw1w2 = cpu_to_be32(cam);
+ uint8_t nvt_blk;
+ uint32_t nvt_idx;
+ bool vo;
+
+ xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo);
+
+ /* First update the registers */
+ xive_tctx_set_os_cam(tctx, qw1w2);
+
+ /* Check the interrupt pending bits */
+ if (vo) {
+ xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx);
+ }
+}
+
+/*
+ * Define a mapping of "special" operations depending on the TIMA page
+ * offset and the size of the operation.
+ */
+typedef struct XiveTmOp {
+ uint8_t page_offset;
+ uint32_t op_offset;
+ unsigned size;
+ void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset,
+ uint64_t value, unsigned size);
+ uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
+ unsigned size);
+} XiveTmOp;
+
+static const XiveTmOp xive_tm_operations[] = {
+ /*
+ * MMIOs below 2K : raw values and special operations without side
+ * effects
+ */
+ { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
+ { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
+ { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
+
+ /* MMIOs above 2K : special operations with side effects */
+ { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
+ { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
+ { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
+};
+
+static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
+{
+ uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
+ uint32_t op_offset = offset & 0xFFF;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
+ const XiveTmOp *xto = &xive_tm_operations[i];
+
+ /* Accesses done from a more privileged TIMA page is allowed */
+ if (xto->page_offset >= page_offset &&
+ xto->op_offset == op_offset &&
+ xto->size == size &&
+ ((write && xto->write_handler) || (!write && xto->read_handler))) {
+ return xto;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * TIMA MMIO handlers
+ */
+void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ const XiveTmOp *xto;
+
+ trace_xive_tctx_tm_write(offset, size, value);
+
+ /*
+ * TODO: check V bit in Q[0-3]W2
+ */
+
+ /*
+ * First, check for special operations in the 2K region
+ */
+ if (offset & 0x800) {
+ xto = xive_tm_find_op(offset, size, true);
+ if (!xto) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
+ "@%"HWADDR_PRIx"\n", offset);
+ } else {
+ xto->write_handler(xptr, tctx, offset, value, size);
+ }
+ return;
+ }
+
+ /*
+ * Then, for special operations in the region below 2K.
+ */
+ xto = xive_tm_find_op(offset, size, true);
+ if (xto) {
+ xto->write_handler(xptr, tctx, offset, value, size);
+ return;
+ }
+
+ /*
+ * Finish with raw access to the register values
+ */
+ xive_tm_raw_write(tctx, offset, value, size);
+}
+
+uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
+ unsigned size)
+{
+ const XiveTmOp *xto;
+ uint64_t ret;
+
+ /*
+ * TODO: check V bit in Q[0-3]W2
+ */
+
+ /*
+ * First, check for special operations in the 2K region
+ */
+ if (offset & 0x800) {
+ xto = xive_tm_find_op(offset, size, false);
+ if (!xto) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
+ "@%"HWADDR_PRIx"\n", offset);
+ return -1;
+ }
+ ret = xto->read_handler(xptr, tctx, offset, size);
+ goto out;
+ }
+
+ /*
+ * Then, for special operations in the region below 2K.
+ */
+ xto = xive_tm_find_op(offset, size, false);
+ if (xto) {
+ ret = xto->read_handler(xptr, tctx, offset, size);
+ goto out;
+ }
+
+ /*
+ * Finish with raw access to the register values
+ */
+ ret = xive_tm_raw_read(tctx, offset, size);
+out:
+ trace_xive_tctx_tm_read(offset, size, ret);
+ return ret;
+}
+
+static char *xive_tctx_ring_print(uint8_t *ring)
+{
+ uint32_t w2 = xive_tctx_word2(ring);
+
+ return g_strdup_printf("%02x %02x %02x %02x %02x "
+ "%02x %02x %02x %08x",
+ ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
+ ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
+ be32_to_cpu(w2));
+}
+
+static const char * const xive_tctx_ring_names[] = {
+ "USER", "OS", "POOL", "PHYS",
+};
+
+/*
+ * kvm_irqchip_in_kernel() will cause the compiler to turn this
+ * info a nop if CONFIG_KVM isn't defined.
+ */
+#define xive_in_kernel(xptr) \
+ (kvm_irqchip_in_kernel() && \
+ ({ \
+ XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \
+ xpc->in_kernel ? xpc->in_kernel(xptr) : false; \
+ }))
+
+void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
+{
+ int cpu_index;
+ int i;
+
+ /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
+ * are hot plugged or unplugged.
+ */
+ if (!tctx) {
+ return;
+ }
+
+ cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
+
+ if (xive_in_kernel(tctx->xptr)) {
+ Error *local_err = NULL;
+
+ kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return;
+ }
+ }
+
+ monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
+ " W2\n", cpu_index);
+
+ for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
+ char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
+ monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
+ xive_tctx_ring_names[i], s);
+ g_free(s);
+ }
+}
+
+void xive_tctx_reset(XiveTCTX *tctx)
+{
+ memset(tctx->regs, 0, sizeof(tctx->regs));
+
+ /* Set some defaults */
+ tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
+ tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
+ tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
+
+ /*
+ * Initialize PIPR to 0xFF to avoid phantom interrupts when the
+ * CPPR is first set.
+ */
+ tctx->regs[TM_QW1_OS + TM_PIPR] =
+ ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
+ tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
+ ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
+}
+
+static void xive_tctx_realize(DeviceState *dev, Error **errp)
+{
+ XiveTCTX *tctx = XIVE_TCTX(dev);
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+
+ assert(tctx->cs);
+ assert(tctx->xptr);
+
+ cpu = POWERPC_CPU(tctx->cs);
+ env = &cpu->env;
+ switch (PPC_INPUT(env)) {
+ case PPC_FLAGS_INPUT_POWER9:
+ tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT];
+ tctx->os_output = env->irq_inputs[POWER9_INPUT_INT];
+ break;
+
+ default:
+ error_setg(errp, "XIVE interrupt controller does not support "
+ "this CPU bus model");
+ return;
+ }
+
+ /* Connect the presenter to the VCPU (required for CPU hotplug) */
+ if (xive_in_kernel(tctx->xptr)) {
+ if (kvmppc_xive_cpu_connect(tctx, errp) < 0) {
+ return;
+ }
+ }
+}
+
+static int vmstate_xive_tctx_pre_save(void *opaque)
+{
+ XiveTCTX *tctx = XIVE_TCTX(opaque);
+ Error *local_err = NULL;
+ int ret;
+
+ if (xive_in_kernel(tctx->xptr)) {
+ ret = kvmppc_xive_cpu_get_state(tctx, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
+{
+ XiveTCTX *tctx = XIVE_TCTX(opaque);
+ Error *local_err = NULL;
+ int ret;
+
+ if (xive_in_kernel(tctx->xptr)) {
+ /*
+ * Required for hotplugged CPU, for which the state comes
+ * after all states of the machine.
+ */
+ ret = kvmppc_xive_cpu_set_state(tctx, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_xive_tctx = {
+ .name = TYPE_XIVE_TCTX,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = vmstate_xive_tctx_pre_save,
+ .post_load = vmstate_xive_tctx_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_BUFFER(regs, XiveTCTX),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static Property xive_tctx_properties[] = {
+ DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *),
+ DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER,
+ XivePresenter *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xive_tctx_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "XIVE Interrupt Thread Context";
+ dc->realize = xive_tctx_realize;
+ dc->vmsd = &vmstate_xive_tctx;
+ device_class_set_props(dc, xive_tctx_properties);
+ /*
+ * Reason: part of XIVE interrupt controller, needs to be wired up
+ * by xive_tctx_create().
+ */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo xive_tctx_info = {
+ .name = TYPE_XIVE_TCTX,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(XiveTCTX),
+ .class_init = xive_tctx_class_init,
+};
+
+Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp)
+{
+ Object *obj;
+
+ obj = object_new(TYPE_XIVE_TCTX);
+ object_property_add_child(cpu, TYPE_XIVE_TCTX, obj);
+ object_unref(obj);
+ object_property_set_link(obj, "cpu", cpu, &error_abort);
+ object_property_set_link(obj, "presenter", OBJECT(xptr), &error_abort);
+ if (!qdev_realize(DEVICE(obj), NULL, errp)) {
+ object_unparent(obj);
+ return NULL;
+ }
+ return obj;
+}
+
+void xive_tctx_destroy(XiveTCTX *tctx)
+{
+ Object *obj = OBJECT(tctx);
+
+ object_unparent(obj);
+}
+
+/*
+ * XIVE ESB helpers
+ */
+
+uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
+{
+ uint8_t old_pq = *pq & 0x3;
+
+ *pq &= ~0x3;
+ *pq |= value & 0x3;
+
+ return old_pq;
+}
+
+bool xive_esb_trigger(uint8_t *pq)
+{
+ uint8_t old_pq = *pq & 0x3;
+
+ switch (old_pq) {
+ case XIVE_ESB_RESET:
+ xive_esb_set(pq, XIVE_ESB_PENDING);
+ return true;
+ case XIVE_ESB_PENDING:
+ case XIVE_ESB_QUEUED:
+ xive_esb_set(pq, XIVE_ESB_QUEUED);
+ return false;
+ case XIVE_ESB_OFF:
+ xive_esb_set(pq, XIVE_ESB_OFF);
+ return false;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+bool xive_esb_eoi(uint8_t *pq)
+{
+ uint8_t old_pq = *pq & 0x3;
+
+ switch (old_pq) {
+ case XIVE_ESB_RESET:
+ case XIVE_ESB_PENDING:
+ xive_esb_set(pq, XIVE_ESB_RESET);
+ return false;
+ case XIVE_ESB_QUEUED:
+ xive_esb_set(pq, XIVE_ESB_PENDING);
+ return true;
+ case XIVE_ESB_OFF:
+ xive_esb_set(pq, XIVE_ESB_OFF);
+ return false;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/*
+ * XIVE Interrupt Source (or IVSE)
+ */
+
+uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
+{
+ assert(srcno < xsrc->nr_irqs);
+
+ return xsrc->status[srcno] & 0x3;
+}
+
+uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
+{
+ assert(srcno < xsrc->nr_irqs);
+
+ return xive_esb_set(&xsrc->status[srcno], pq);
+}
+
+/*
+ * Returns whether the event notification should be forwarded.
+ */
+static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
+{
+ uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
+
+ xive_source_set_asserted(xsrc, srcno, true);
+
+ switch (old_pq) {
+ case XIVE_ESB_RESET:
+ xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Returns whether the event notification should be forwarded.
+ */
+static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
+{
+ bool ret;
+
+ assert(srcno < xsrc->nr_irqs);
+
+ ret = xive_esb_trigger(&xsrc->status[srcno]);
+
+ if (xive_source_irq_is_lsi(xsrc, srcno) &&
+ xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: queued an event on LSI IRQ %d\n", srcno);
+ }
+
+ return ret;
+}
+
+/*
+ * Returns whether the event notification should be forwarded.
+ */
+static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
+{
+ bool ret;
+
+ assert(srcno < xsrc->nr_irqs);
+
+ ret = xive_esb_eoi(&xsrc->status[srcno]);
+
+ /*
+ * LSI sources do not set the Q bit but they can still be
+ * asserted, in which case we should forward a new event
+ * notification
+ */
+ if (xive_source_irq_is_lsi(xsrc, srcno) &&
+ xive_source_is_asserted(xsrc, srcno)) {
+ ret = xive_source_lsi_trigger(xsrc, srcno);
+ }
+
+ return ret;
+}
+
+/*
+ * Forward the source event notification to the Router
+ */
+static void xive_source_notify(XiveSource *xsrc, int srcno)
+{
+ XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
+
+ if (xnc->notify) {
+ xnc->notify(xsrc->xive, srcno);
+ }
+}
+
+/*
+ * In a two pages ESB MMIO setting, even page is the trigger page, odd
+ * page is for management
+ */
+static inline bool addr_is_even(hwaddr addr, uint32_t shift)
+{
+ return !((addr >> shift) & 1);
+}
+
+static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
+{
+ return xive_source_esb_has_2page(xsrc) &&
+ addr_is_even(addr, xsrc->esb_shift - 1);
+}
+
+/*
+ * ESB MMIO loads
+ * Trigger page Management/EOI page
+ *
+ * ESB MMIO setting 2 pages 1 or 2 pages
+ *
+ * 0x000 .. 0x3FF -1 EOI and return 0|1
+ * 0x400 .. 0x7FF -1 EOI and return 0|1
+ * 0x800 .. 0xBFF -1 return PQ
+ * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
+ * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
+ * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
+ * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
+ */
+static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
+{
+ XiveSource *xsrc = XIVE_SOURCE(opaque);
+ uint32_t offset = addr & 0xFFF;
+ uint32_t srcno = addr >> xsrc->esb_shift;
+ uint64_t ret = -1;
+
+ /* In a two pages ESB MMIO setting, trigger page should not be read */
+ if (xive_source_is_trigger_page(xsrc, addr)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: invalid load on IRQ %d trigger page at "
+ "0x%"HWADDR_PRIx"\n", srcno, addr);
+ return -1;
+ }
+
+ switch (offset) {
+ case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
+ ret = xive_source_esb_eoi(xsrc, srcno);
+
+ /* Forward the source event notification for routing */
+ if (ret) {
+ xive_source_notify(xsrc, srcno);
+ }
+ break;
+
+ case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
+ ret = xive_source_esb_get(xsrc, srcno);
+ break;
+
+ case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
+ case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
+ case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
+ case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
+ ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
+ offset);
+ }
+
+ trace_xive_source_esb_read(addr, srcno, ret);
+
+ return ret;
+}
+
+/*
+ * ESB MMIO stores
+ * Trigger page Management/EOI page
+ *
+ * ESB MMIO setting 2 pages 1 or 2 pages
+ *
+ * 0x000 .. 0x3FF Trigger Trigger
+ * 0x400 .. 0x7FF Trigger EOI
+ * 0x800 .. 0xBFF Trigger undefined
+ * 0xC00 .. 0xCFF Trigger PQ=00
+ * 0xD00 .. 0xDFF Trigger PQ=01
+ * 0xE00 .. 0xDFF Trigger PQ=10
+ * 0xF00 .. 0xDFF Trigger PQ=11
+ */
+static void xive_source_esb_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ XiveSource *xsrc = XIVE_SOURCE(opaque);
+ uint32_t offset = addr & 0xFFF;
+ uint32_t srcno = addr >> xsrc->esb_shift;
+ bool notify = false;
+
+ trace_xive_source_esb_write(addr, srcno, value);
+
+ /* In a two pages ESB MMIO setting, trigger page only triggers */
+ if (xive_source_is_trigger_page(xsrc, addr)) {
+ notify = xive_source_esb_trigger(xsrc, srcno);
+ goto out;
+ }
+
+ switch (offset) {
+ case 0 ... 0x3FF:
+ notify = xive_source_esb_trigger(xsrc, srcno);
+ break;
+
+ case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
+ if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: invalid Store EOI for IRQ %d\n", srcno);
+ return;
+ }
+
+ notify = xive_source_esb_eoi(xsrc, srcno);
+ break;
+
+ case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
+ case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
+ case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
+ case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
+ xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
+ offset);
+ return;
+ }
+
+out:
+ /* Forward the source event notification for routing */
+ if (notify) {
+ xive_source_notify(xsrc, srcno);
+ }
+}
+
+static const MemoryRegionOps xive_source_esb_ops = {
+ .read = xive_source_esb_read,
+ .write = xive_source_esb_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+void xive_source_set_irq(void *opaque, int srcno, int val)
+{
+ XiveSource *xsrc = XIVE_SOURCE(opaque);
+ bool notify = false;
+
+ if (xive_source_irq_is_lsi(xsrc, srcno)) {
+ if (val) {
+ notify = xive_source_lsi_trigger(xsrc, srcno);
+ } else {
+ xive_source_set_asserted(xsrc, srcno, false);
+ }
+ } else {
+ if (val) {
+ notify = xive_source_esb_trigger(xsrc, srcno);
+ }
+ }
+
+ /* Forward the source event notification for routing */
+ if (notify) {
+ xive_source_notify(xsrc, srcno);
+ }
+}
+
+void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
+{
+ int i;
+
+ for (i = 0; i < xsrc->nr_irqs; i++) {
+ uint8_t pq = xive_source_esb_get(xsrc, i);
+
+ if (pq == XIVE_ESB_OFF) {
+ continue;
+ }
+
+ monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
+ xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
+ pq & XIVE_ESB_VAL_P ? 'P' : '-',
+ pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
+ xive_source_is_asserted(xsrc, i) ? 'A' : ' ');
+ }
+}
+
+static void xive_source_reset(void *dev)
+{
+ XiveSource *xsrc = XIVE_SOURCE(dev);
+
+ /* Do not clear the LSI bitmap */
+
+ /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */
+ memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
+}
+
+static void xive_source_realize(DeviceState *dev, Error **errp)
+{
+ XiveSource *xsrc = XIVE_SOURCE(dev);
+ size_t esb_len = xive_source_esb_len(xsrc);
+
+ assert(xsrc->xive);
+
+ if (!xsrc->nr_irqs) {
+ error_setg(errp, "Number of interrupt needs to be greater than 0");
+ return;
+ }
+
+ if (xsrc->esb_shift != XIVE_ESB_4K &&
+ xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
+ xsrc->esb_shift != XIVE_ESB_64K &&
+ xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
+ error_setg(errp, "Invalid ESB shift setting");
+ return;
+ }
+
+ xsrc->status = g_malloc0(xsrc->nr_irqs);
+ xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
+
+ memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len);
+ memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc),
+ &xive_source_esb_ops, xsrc, "xive.esb-emulated",
+ esb_len);
+ memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated);
+
+ qemu_register_reset(xive_source_reset, dev);
+}
+
+static const VMStateDescription vmstate_xive_source = {
+ .name = TYPE_XIVE_SOURCE,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
+ VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+/*
+ * The default XIVE interrupt source setting for the ESB MMIOs is two
+ * 64k pages without Store EOI, to be in sync with KVM.
+ */
+static Property xive_source_properties[] = {
+ DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
+ DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
+ DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
+ DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER,
+ XiveNotifier *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xive_source_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "XIVE Interrupt Source";
+ device_class_set_props(dc, xive_source_properties);
+ dc->realize = xive_source_realize;
+ dc->vmsd = &vmstate_xive_source;
+ /*
+ * Reason: part of XIVE interrupt controller, needs to be wired up,
+ * e.g. by spapr_xive_instance_init().
+ */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo xive_source_info = {
+ .name = TYPE_XIVE_SOURCE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(XiveSource),
+ .class_init = xive_source_class_init,
+};
+
+/*
+ * XiveEND helpers
+ */
+
+void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
+{
+ uint64_t qaddr_base = xive_end_qaddr(end);
+ uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
+ uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
+ uint32_t qentries = 1 << (qsize + 10);
+ int i;
+
+ /*
+ * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
+ */
+ monitor_printf(mon, " [ ");
+ qindex = (qindex - (width - 1)) & (qentries - 1);
+ for (i = 0; i < width; i++) {
+ uint64_t qaddr = qaddr_base + (qindex << 2);
+ uint32_t qdata = -1;
+
+ if (dma_memory_read(&address_space_memory, qaddr, &qdata,
+ sizeof(qdata))) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
+ HWADDR_PRIx "\n", qaddr);
+ return;
+ }
+ monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
+ be32_to_cpu(qdata));
+ qindex = (qindex + 1) & (qentries - 1);
+ }
+ monitor_printf(mon, "]");
+}
+
+void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
+{
+ uint64_t qaddr_base = xive_end_qaddr(end);
+ uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
+ uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
+ uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
+ uint32_t qentries = 1 << (qsize + 10);
+
+ uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
+ uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
+ uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
+ uint8_t pq;
+
+ if (!xive_end_is_valid(end)) {
+ return;
+ }
+
+ pq = xive_get_field32(END_W1_ESn, end->w1);
+
+ monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
+ end_idx,
+ pq & XIVE_ESB_VAL_P ? 'P' : '-',
+ pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
+ xive_end_is_valid(end) ? 'v' : '-',
+ xive_end_is_enqueue(end) ? 'q' : '-',
+ xive_end_is_notify(end) ? 'n' : '-',
+ xive_end_is_backlog(end) ? 'b' : '-',
+ xive_end_is_escalate(end) ? 'e' : '-',
+ xive_end_is_uncond_escalation(end) ? 'u' : '-',
+ xive_end_is_silent_escalation(end) ? 's' : '-',
+ xive_end_is_firmware(end) ? 'f' : '-',
+ priority, nvt_blk, nvt_idx);
+
+ if (qaddr_base) {
+ monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
+ qaddr_base, qindex, qentries, qgen);
+ xive_end_queue_pic_print_info(end, 6, mon);
+ }
+ monitor_printf(mon, "\n");
+}
+
+static void xive_end_enqueue(XiveEND *end, uint32_t data)
+{
+ uint64_t qaddr_base = xive_end_qaddr(end);
+ uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
+ uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
+ uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
+
+ uint64_t qaddr = qaddr_base + (qindex << 2);
+ uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
+ uint32_t qentries = 1 << (qsize + 10);
+
+ if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
+ HWADDR_PRIx "\n", qaddr);
+ return;
+ }
+
+ qindex = (qindex + 1) & (qentries - 1);
+ if (qindex == 0) {
+ qgen ^= 1;
+ end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
+ }
+ end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
+}
+
+void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx,
+ Monitor *mon)
+{
+ XiveEAS *eas = (XiveEAS *) &end->w4;
+ uint8_t pq;
+
+ if (!xive_end_is_escalate(end)) {
+ return;
+ }
+
+ pq = xive_get_field32(END_W1_ESe, end->w1);
+
+ monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
+ end_idx,
+ pq & XIVE_ESB_VAL_P ? 'P' : '-',
+ pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
+ xive_eas_is_valid(eas) ? 'V' : ' ',
+ xive_eas_is_masked(eas) ? 'M' : ' ',
+ (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
+ (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
+ (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
+}
+
+/*
+ * XIVE Router (aka. Virtualization Controller or IVRE)
+ */
+
+int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ XiveEAS *eas)
+{
+ XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
+}
+
+int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
+ XiveEND *end)
+{
+ XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_end(xrtr, end_blk, end_idx, end);
+}
+
+int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
+ XiveEND *end, uint8_t word_number)
+{
+ XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
+}
+
+int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
+ XiveNVT *nvt)
+{
+ XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
+}
+
+int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
+ XiveNVT *nvt, uint8_t word_number)
+{
+ XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
+}
+
+static int xive_router_get_block_id(XiveRouter *xrtr)
+{
+ XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_block_id(xrtr);
+}
+
+static void xive_router_realize(DeviceState *dev, Error **errp)
+{
+ XiveRouter *xrtr = XIVE_ROUTER(dev);
+
+ assert(xrtr->xfb);
+}
+
+/*
+ * Encode the HW CAM line in the block group mode format :
+ *
+ * chip << 19 | 0000000 0 0001 thread (7Bit)
+ */
+static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
+{
+ CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
+ uint32_t pir = env->spr_cb[SPR_PIR].default_value;
+ uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr));
+
+ return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f));
+}
+
+/*
+ * The thread context register words are in big-endian format.
+ */
+int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
+ uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint32_t logic_serv)
+{
+ uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
+ uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
+ uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
+ uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
+ uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
+
+ /*
+ * TODO (PowerNV): ignore mode. The low order bits of the NVT
+ * identifier are ignored in the "CAM" match.
+ */
+
+ if (format == 0) {
+ if (cam_ignore == true) {
+ /*
+ * F=0 & i=1: Logical server notification (bits ignored at
+ * the end of the NVT identifier)
+ */
+ qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
+ nvt_blk, nvt_idx);
+ return -1;
+ }
+
+ /* F=0 & i=0: Specific NVT notification */
+
+ /* PHYS ring */
+ if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
+ cam == xive_tctx_hw_cam_line(xptr, tctx)) {
+ return TM_QW3_HV_PHYS;
+ }
+
+ /* HV POOL ring */
+ if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
+ cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
+ return TM_QW2_HV_POOL;
+ }
+
+ /* OS ring */
+ if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
+ cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
+ return TM_QW1_OS;
+ }
+ } else {
+ /* F=1 : User level Event-Based Branch (EBB) notification */
+
+ /* USER ring */
+ if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
+ (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
+ (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
+ (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
+ return TM_QW0_USER;
+ }
+ }
+ return -1;
+}
+
+/*
+ * This is our simple Xive Presenter Engine model. It is merged in the
+ * Router as it does not require an extra object.
+ *
+ * It receives notification requests sent by the IVRE to find one
+ * matching NVT (or more) dispatched on the processor threads. In case
+ * of a single NVT notification, the process is abreviated and the
+ * thread is signaled if a match is found. In case of a logical server
+ * notification (bits ignored at the end of the NVT identifier), the
+ * IVPE and IVRE select a winning thread using different filters. This
+ * involves 2 or 3 exchanges on the PowerBus that the model does not
+ * support.
+ *
+ * The parameters represent what is sent on the PowerBus
+ */
+bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv)
+{
+ XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb);
+ XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
+ int count;
+
+ /*
+ * Ask the machine to scan the interrupt controllers for a match
+ */
+ count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore,
+ priority, logic_serv, &match);
+ if (count < 0) {
+ return false;
+ }
+
+ /* handle CPU exception delivery */
+ if (count) {
+ trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring);
+ xive_tctx_ipb_update(match.tctx, match.ring,
+ xive_priority_to_ipb(priority));
+ }
+
+ return !!count;
+}
+
+/*
+ * Notification using the END ESe/ESn bit (Event State Buffer for
+ * escalation and notification). Provide further coalescing in the
+ * Router.
+ */
+static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
+ uint32_t end_idx, XiveEND *end,
+ uint32_t end_esmask)
+{
+ uint8_t pq = xive_get_field32(end_esmask, end->w1);
+ bool notify = xive_esb_trigger(&pq);
+
+ if (pq != xive_get_field32(end_esmask, end->w1)) {
+ end->w1 = xive_set_field32(end_esmask, end->w1, pq);
+ xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
+ }
+
+ /* ESe/n[Q]=1 : end of notification */
+ return notify;
+}
+
+/*
+ * An END trigger can come from an event trigger (IPI or HW) or from
+ * another chip. We don't model the PowerBus but the END trigger
+ * message has the same parameters than in the function below.
+ */
+static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
+ uint32_t end_idx, uint32_t end_data)
+{
+ XiveEND end;
+ uint8_t priority;
+ uint8_t format;
+ uint8_t nvt_blk;
+ uint32_t nvt_idx;
+ XiveNVT nvt;
+ bool found;
+
+ /* END cache lookup */
+ if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
+ end_idx);
+ return;
+ }
+
+ if (!xive_end_is_valid(&end)) {
+ trace_xive_router_end_notify(end_blk, end_idx, end_data);
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
+ end_blk, end_idx);
+ return;
+ }
+
+ if (xive_end_is_enqueue(&end)) {
+ xive_end_enqueue(&end, end_data);
+ /* Enqueuing event data modifies the EQ toggle and index */
+ xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
+ }
+
+ /*
+ * When the END is silent, we skip the notification part.
+ */
+ if (xive_end_is_silent_escalation(&end)) {
+ goto do_escalation;
+ }
+
+ /*
+ * The W7 format depends on the F bit in W6. It defines the type
+ * of the notification :
+ *
+ * F=0 : single or multiple NVT notification
+ * F=1 : User level Event-Based Branch (EBB) notification, no
+ * priority
+ */
+ format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
+ priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
+
+ /* The END is masked */
+ if (format == 0 && priority == 0xff) {
+ return;
+ }
+
+ /*
+ * Check the END ESn (Event State Buffer for notification) for
+ * even further coalescing in the Router
+ */
+ if (!xive_end_is_notify(&end)) {
+ /* ESn[Q]=1 : end of notification */
+ if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
+ &end, END_W1_ESn)) {
+ return;
+ }
+ }
+
+ /*
+ * Follows IVPE notification
+ */
+ nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
+ nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
+
+ /* NVT cache lookup */
+ if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
+ nvt_blk, nvt_idx);
+ return;
+ }
+
+ if (!xive_nvt_is_valid(&nvt)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
+ nvt_blk, nvt_idx);
+ return;
+ }
+
+ found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx,
+ xive_get_field32(END_W7_F0_IGNORE, end.w7),
+ priority,
+ xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
+
+ /* TODO: Auto EOI. */
+
+ if (found) {
+ return;
+ }
+
+ /*
+ * If no matching NVT is dispatched on a HW thread :
+ * - specific VP: update the NVT structure if backlog is activated
+ * - logical server : forward request to IVPE (not supported)
+ */
+ if (xive_end_is_backlog(&end)) {
+ uint8_t ipb;
+
+ if (format == 1) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: END %x/%x invalid config: F1 & backlog\n",
+ end_blk, end_idx);
+ return;
+ }
+ /*
+ * Record the IPB in the associated NVT structure for later
+ * use. The presenter will resend the interrupt when the vCPU
+ * is dispatched again on a HW thread.
+ */
+ ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) |
+ xive_priority_to_ipb(priority);
+ nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb);
+ xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
+
+ /*
+ * On HW, follows a "Broadcast Backlog" to IVPEs
+ */
+ }
+
+do_escalation:
+ /*
+ * If activated, escalate notification using the ESe PQ bits and
+ * the EAS in w4-5
+ */
+ if (!xive_end_is_escalate(&end)) {
+ return;
+ }
+
+ /*
+ * Check the END ESe (Event State Buffer for escalation) for even
+ * further coalescing in the Router
+ */
+ if (!xive_end_is_uncond_escalation(&end)) {
+ /* ESe[Q]=1 : end of notification */
+ if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
+ &end, END_W1_ESe)) {
+ return;
+ }
+ }
+
+ trace_xive_router_end_escalate(end_blk, end_idx,
+ (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
+ (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
+ (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5));
+ /*
+ * The END trigger becomes an Escalation trigger
+ */
+ xive_router_end_notify(xrtr,
+ xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
+ xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
+ xive_get_field32(END_W5_ESC_END_DATA, end.w5));
+}
+
+void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
+{
+ XiveRouter *xrtr = XIVE_ROUTER(xn);
+ uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
+ uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
+ XiveEAS eas;
+
+ /* EAS cache lookup */
+ if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
+ return;
+ }
+
+ /*
+ * The IVRE checks the State Bit Cache at this point. We skip the
+ * SBC lookup because the state bits of the sources are modeled
+ * internally in QEMU.
+ */
+
+ if (!xive_eas_is_valid(&eas)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
+ return;
+ }
+
+ if (xive_eas_is_masked(&eas)) {
+ /* Notification completed */
+ return;
+ }
+
+ /*
+ * The event trigger becomes an END trigger
+ */
+ xive_router_end_notify(xrtr,
+ xive_get_field64(EAS_END_BLOCK, eas.w),
+ xive_get_field64(EAS_END_INDEX, eas.w),
+ xive_get_field64(EAS_END_DATA, eas.w));
+}
+
+static Property xive_router_properties[] = {
+ DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb,
+ TYPE_XIVE_FABRIC, XiveFabric *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xive_router_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
+
+ dc->desc = "XIVE Router Engine";
+ device_class_set_props(dc, xive_router_properties);
+ /* Parent is SysBusDeviceClass. No need to call its realize hook */
+ dc->realize = xive_router_realize;
+ xnc->notify = xive_router_notify;
+}
+
+static const TypeInfo xive_router_info = {
+ .name = TYPE_XIVE_ROUTER,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .abstract = true,
+ .instance_size = sizeof(XiveRouter),
+ .class_size = sizeof(XiveRouterClass),
+ .class_init = xive_router_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_XIVE_NOTIFIER },
+ { TYPE_XIVE_PRESENTER },
+ { }
+ }
+};
+
+void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
+{
+ if (!xive_eas_is_valid(eas)) {
+ return;
+ }
+
+ monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
+ lisn, xive_eas_is_masked(eas) ? "M" : " ",
+ (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
+ (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
+ (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
+}
+
+/*
+ * END ESB MMIO loads
+ */
+static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
+{
+ XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
+ uint32_t offset = addr & 0xFFF;
+ uint8_t end_blk;
+ uint32_t end_idx;
+ XiveEND end;
+ uint32_t end_esmask;
+ uint8_t pq;
+ uint64_t ret = -1;
+
+ /*
+ * The block id should be deduced from the load address on the END
+ * ESB MMIO but our model only supports a single block per XIVE chip.
+ */
+ end_blk = xive_router_get_block_id(xsrc->xrtr);
+ end_idx = addr >> (xsrc->esb_shift + 1);
+
+ trace_xive_end_source_read(end_blk, end_idx, addr);
+
+ if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
+ end_idx);
+ return -1;
+ }
+
+ if (!xive_end_is_valid(&end)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
+ end_blk, end_idx);
+ return -1;
+ }
+
+ end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
+ pq = xive_get_field32(end_esmask, end.w1);
+
+ switch (offset) {
+ case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
+ ret = xive_esb_eoi(&pq);
+
+ /* Forward the source event notification for routing ?? */
+ break;
+
+ case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
+ ret = pq;
+ break;
+
+ case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
+ case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
+ case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
+ case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
+ ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
+ offset);
+ return -1;
+ }
+
+ if (pq != xive_get_field32(end_esmask, end.w1)) {
+ end.w1 = xive_set_field32(end_esmask, end.w1, pq);
+ xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
+ }
+
+ return ret;
+}
+
+/*
+ * END ESB MMIO stores are invalid
+ */
+static void xive_end_source_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
+ HWADDR_PRIx"\n", addr);
+}
+
+static const MemoryRegionOps xive_end_source_ops = {
+ .read = xive_end_source_read,
+ .write = xive_end_source_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+static void xive_end_source_realize(DeviceState *dev, Error **errp)
+{
+ XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
+
+ assert(xsrc->xrtr);
+
+ if (!xsrc->nr_ends) {
+ error_setg(errp, "Number of interrupt needs to be greater than 0");
+ return;
+ }
+
+ if (xsrc->esb_shift != XIVE_ESB_4K &&
+ xsrc->esb_shift != XIVE_ESB_64K) {
+ error_setg(errp, "Invalid ESB shift setting");
+ return;
+ }
+
+ /*
+ * Each END is assigned an even/odd pair of MMIO pages, the even page
+ * manages the ESn field while the odd page manages the ESe field.
+ */
+ memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
+ &xive_end_source_ops, xsrc, "xive.end",
+ (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
+}
+
+static Property xive_end_source_properties[] = {
+ DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
+ DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
+ DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER,
+ XiveRouter *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xive_end_source_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "XIVE END Source";
+ device_class_set_props(dc, xive_end_source_properties);
+ dc->realize = xive_end_source_realize;
+ /*
+ * Reason: part of XIVE interrupt controller, needs to be wired up,
+ * e.g. by spapr_xive_instance_init().
+ */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo xive_end_source_info = {
+ .name = TYPE_XIVE_END_SOURCE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(XiveENDSource),
+ .class_init = xive_end_source_class_init,
+};
+
+/*
+ * XIVE Notifier
+ */
+static const TypeInfo xive_notifier_info = {
+ .name = TYPE_XIVE_NOTIFIER,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(XiveNotifierClass),
+};
+
+/*
+ * XIVE Presenter
+ */
+static const TypeInfo xive_presenter_info = {
+ .name = TYPE_XIVE_PRESENTER,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(XivePresenterClass),
+};
+
+/*
+ * XIVE Fabric
+ */
+static const TypeInfo xive_fabric_info = {
+ .name = TYPE_XIVE_FABRIC,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(XiveFabricClass),
+};
+
+static void xive_register_types(void)
+{
+ type_register_static(&xive_fabric_info);
+ type_register_static(&xive_source_info);
+ type_register_static(&xive_notifier_info);
+ type_register_static(&xive_presenter_info);
+ type_register_static(&xive_router_info);
+ type_register_static(&xive_end_source_info);
+ type_register_static(&xive_tctx_info);
+}
+
+type_init(xive_register_types)
diff --git a/hw/intc/xlnx-pmu-iomod-intc.c b/hw/intc/xlnx-pmu-iomod-intc.c
new file mode 100644
index 000000000..acaa1c3e6
--- /dev/null
+++ b/hw/intc/xlnx-pmu-iomod-intc.c
@@ -0,0 +1,558 @@
+/*
+ * QEMU model of Xilinx I/O Module Interrupt Controller
+ *
+ * Copyright (c) 2013 Xilinx Inc
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ * Written by Alistair Francis <alistair.francis@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "hw/register.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "hw/intc/xlnx-pmu-iomod-intc.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+
+#ifndef XLNX_PMU_IO_INTC_ERR_DEBUG
+#define XLNX_PMU_IO_INTC_ERR_DEBUG 0
+#endif
+
+#define DB_PRINT_L(lvl, fmt, args...) do {\
+ if (XLNX_PMU_IO_INTC_ERR_DEBUG >= lvl) {\
+ qemu_log(TYPE_XLNX_PMU_IO_INTC ": %s:" fmt, __func__, ## args);\
+ } \
+} while (0)
+
+#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args)
+
+REG32(IRQ_MODE, 0xc)
+REG32(GPO0, 0x10)
+ FIELD(GPO0, MAGIC_WORD_1, 24, 8)
+ FIELD(GPO0, MAGIC_WORD_2, 16, 8)
+ FIELD(GPO0, FT_INJECT_FAILURE, 13, 3)
+ FIELD(GPO0, DISABLE_RST_FTSM, 12, 1)
+ FIELD(GPO0, RST_FTSM, 11, 1)
+ FIELD(GPO0, CLR_FTSTS, 10, 1)
+ FIELD(GPO0, RST_ON_SLEEP, 9, 1)
+ FIELD(GPO0, DISABLE_TRACE_COMP, 8, 1)
+ FIELD(GPO0, PIT3_PRESCALE, 7, 1)
+ FIELD(GPO0, PIT2_PRESCALE, 5, 2)
+ FIELD(GPO0, PIT1_PRESCALE, 3, 2)
+ FIELD(GPO0, PIT0_PRESCALE, 1, 2)
+ FIELD(GPO0, DEBUG_REMAP, 0, 1)
+REG32(GPO1, 0x14)
+ FIELD(GPO1, MIO_5, 5, 1)
+ FIELD(GPO1, MIO_4, 4, 1)
+ FIELD(GPO1, MIO_3, 3, 1)
+ FIELD(GPO1, MIO_2, 2, 1)
+ FIELD(GPO1, MIO_1, 1, 1)
+ FIELD(GPO1, MIO_0, 0, 1)
+REG32(GPO2, 0x18)
+ FIELD(GPO2, DAP_RPU_WAKE_ACK, 9, 1)
+ FIELD(GPO2, DAP_FP_WAKE_ACK, 8, 1)
+ FIELD(GPO2, PS_STATUS, 7, 1)
+ FIELD(GPO2, PCAP_EN, 6, 1)
+REG32(GPO3, 0x1c)
+ FIELD(GPO3, PL_GPO_31, 31, 1)
+ FIELD(GPO3, PL_GPO_30, 30, 1)
+ FIELD(GPO3, PL_GPO_29, 29, 1)
+ FIELD(GPO3, PL_GPO_28, 28, 1)
+ FIELD(GPO3, PL_GPO_27, 27, 1)
+ FIELD(GPO3, PL_GPO_26, 26, 1)
+ FIELD(GPO3, PL_GPO_25, 25, 1)
+ FIELD(GPO3, PL_GPO_24, 24, 1)
+ FIELD(GPO3, PL_GPO_23, 23, 1)
+ FIELD(GPO3, PL_GPO_22, 22, 1)
+ FIELD(GPO3, PL_GPO_21, 21, 1)
+ FIELD(GPO3, PL_GPO_20, 20, 1)
+ FIELD(GPO3, PL_GPO_19, 19, 1)
+ FIELD(GPO3, PL_GPO_18, 18, 1)
+ FIELD(GPO3, PL_GPO_17, 17, 1)
+ FIELD(GPO3, PL_GPO_16, 16, 1)
+ FIELD(GPO3, PL_GPO_15, 15, 1)
+ FIELD(GPO3, PL_GPO_14, 14, 1)
+ FIELD(GPO3, PL_GPO_13, 13, 1)
+ FIELD(GPO3, PL_GPO_12, 12, 1)
+ FIELD(GPO3, PL_GPO_11, 11, 1)
+ FIELD(GPO3, PL_GPO_10, 10, 1)
+ FIELD(GPO3, PL_GPO_9, 9, 1)
+ FIELD(GPO3, PL_GPO_8, 8, 1)
+ FIELD(GPO3, PL_GPO_7, 7, 1)
+ FIELD(GPO3, PL_GPO_6, 6, 1)
+ FIELD(GPO3, PL_GPO_5, 5, 1)
+ FIELD(GPO3, PL_GPO_4, 4, 1)
+ FIELD(GPO3, PL_GPO_3, 3, 1)
+ FIELD(GPO3, PL_GPO_2, 2, 1)
+ FIELD(GPO3, PL_GPO_1, 1, 1)
+ FIELD(GPO3, PL_GPO_0, 0, 1)
+REG32(GPI0, 0x20)
+ FIELD(GPI0, RFT_ECC_FATAL_ERR, 31, 1)
+ FIELD(GPI0, RFT_VOTER_ERR, 30, 1)
+ FIELD(GPI0, RFT_COMPARE_ERR_23, 29, 1)
+ FIELD(GPI0, RFT_COMPARE_ERR_13, 28, 1)
+ FIELD(GPI0, RFT_COMPARE_ERR_12, 27, 1)
+ FIELD(GPI0, RFT_LS_MISMATCH_23_B, 26, 1)
+ FIELD(GPI0, RFT_LS_MISMATCH_13_B, 25, 1)
+ FIELD(GPI0, RFT_LS_MISMATCH_12_B, 24, 1)
+ FIELD(GPI0, RFT_MISMATCH_STATE, 23, 1)
+ FIELD(GPI0, RFT_MISMATCH_CPU, 22, 1)
+ FIELD(GPI0, RFT_SLEEP_RESET, 19, 1)
+ FIELD(GPI0, RFT_LS_MISMATCH_23_A, 18, 1)
+ FIELD(GPI0, RFT_LS_MISMATCH_13_A, 17, 1)
+ FIELD(GPI0, RFT_LS_MISMATCH_12_A, 16, 1)
+ FIELD(GPI0, NFT_ECC_FATAL_ERR, 15, 1)
+ FIELD(GPI0, NFT_VOTER_ERR, 14, 1)
+ FIELD(GPI0, NFT_COMPARE_ERR_23, 13, 1)
+ FIELD(GPI0, NFT_COMPARE_ERR_13, 12, 1)
+ FIELD(GPI0, NFT_COMPARE_ERR_12, 11, 1)
+ FIELD(GPI0, NFT_LS_MISMATCH_23_B, 10, 1)
+ FIELD(GPI0, NFT_LS_MISMATCH_13_B, 9, 1)
+ FIELD(GPI0, NFT_LS_MISMATCH_12_B, 8, 1)
+ FIELD(GPI0, NFT_MISMATCH_STATE, 7, 1)
+ FIELD(GPI0, NFT_MISMATCH_CPU, 6, 1)
+ FIELD(GPI0, NFT_SLEEP_RESET, 3, 1)
+ FIELD(GPI0, NFT_LS_MISMATCH_23_A, 2, 1)
+ FIELD(GPI0, NFT_LS_MISMATCH_13_A, 1, 1)
+ FIELD(GPI0, NFT_LS_MISMATCH_12_A, 0, 1)
+REG32(GPI1, 0x24)
+ FIELD(GPI1, APB_AIB_ERROR, 31, 1)
+ FIELD(GPI1, AXI_AIB_ERROR, 30, 1)
+ FIELD(GPI1, ERROR_2, 29, 1)
+ FIELD(GPI1, ERROR_1, 28, 1)
+ FIELD(GPI1, ACPU_3_DBG_PWRUP, 23, 1)
+ FIELD(GPI1, ACPU_2_DBG_PWRUP, 22, 1)
+ FIELD(GPI1, ACPU_1_DBG_PWRUP, 21, 1)
+ FIELD(GPI1, ACPU_0_DBG_PWRUP, 20, 1)
+ FIELD(GPI1, FPD_WAKE_GIC_PROXY, 16, 1)
+ FIELD(GPI1, MIO_WAKE_5, 15, 1)
+ FIELD(GPI1, MIO_WAKE_4, 14, 1)
+ FIELD(GPI1, MIO_WAKE_3, 13, 1)
+ FIELD(GPI1, MIO_WAKE_2, 12, 1)
+ FIELD(GPI1, MIO_WAKE_1, 11, 1)
+ FIELD(GPI1, MIO_WAKE_0, 10, 1)
+ FIELD(GPI1, DAP_RPU_WAKE, 9, 1)
+ FIELD(GPI1, DAP_FPD_WAKE, 8, 1)
+ FIELD(GPI1, USB_1_WAKE, 7, 1)
+ FIELD(GPI1, USB_0_WAKE, 6, 1)
+ FIELD(GPI1, R5_1_WAKE, 5, 1)
+ FIELD(GPI1, R5_0_WAKE, 4, 1)
+ FIELD(GPI1, ACPU_3_WAKE, 3, 1)
+ FIELD(GPI1, ACPU_2_WAKE, 2, 1)
+ FIELD(GPI1, ACPU_1_WAKE, 1, 1)
+ FIELD(GPI1, ACPU_0_WAKE, 0, 1)
+REG32(GPI2, 0x28)
+ FIELD(GPI2, VCC_INT_FP_DISCONNECT, 31, 1)
+ FIELD(GPI2, VCC_INT_DISCONNECT, 30, 1)
+ FIELD(GPI2, VCC_AUX_DISCONNECT, 29, 1)
+ FIELD(GPI2, DBG_ACPU3_RST_REQ, 23, 1)
+ FIELD(GPI2, DBG_ACPU2_RST_REQ, 22, 1)
+ FIELD(GPI2, DBG_ACPU1_RST_REQ, 21, 1)
+ FIELD(GPI2, DBG_ACPU0_RST_REQ, 20, 1)
+ FIELD(GPI2, CP_ACPU3_RST_REQ, 19, 1)
+ FIELD(GPI2, CP_ACPU2_RST_REQ, 18, 1)
+ FIELD(GPI2, CP_ACPU1_RST_REQ, 17, 1)
+ FIELD(GPI2, CP_ACPU0_RST_REQ, 16, 1)
+ FIELD(GPI2, DBG_RCPU1_RST_REQ, 9, 1)
+ FIELD(GPI2, DBG_RCPU0_RST_REQ, 8, 1)
+ FIELD(GPI2, R5_1_SLEEP, 5, 1)
+ FIELD(GPI2, R5_0_SLEEP, 4, 1)
+ FIELD(GPI2, ACPU_3_SLEEP, 3, 1)
+ FIELD(GPI2, ACPU_2_SLEEP, 2, 1)
+ FIELD(GPI2, ACPU_1_SLEEP, 1, 1)
+ FIELD(GPI2, ACPU_0_SLEEP, 0, 1)
+REG32(GPI3, 0x2c)
+ FIELD(GPI3, PL_GPI_31, 31, 1)
+ FIELD(GPI3, PL_GPI_30, 30, 1)
+ FIELD(GPI3, PL_GPI_29, 29, 1)
+ FIELD(GPI3, PL_GPI_28, 28, 1)
+ FIELD(GPI3, PL_GPI_27, 27, 1)
+ FIELD(GPI3, PL_GPI_26, 26, 1)
+ FIELD(GPI3, PL_GPI_25, 25, 1)
+ FIELD(GPI3, PL_GPI_24, 24, 1)
+ FIELD(GPI3, PL_GPI_23, 23, 1)
+ FIELD(GPI3, PL_GPI_22, 22, 1)
+ FIELD(GPI3, PL_GPI_21, 21, 1)
+ FIELD(GPI3, PL_GPI_20, 20, 1)
+ FIELD(GPI3, PL_GPI_19, 19, 1)
+ FIELD(GPI3, PL_GPI_18, 18, 1)
+ FIELD(GPI3, PL_GPI_17, 17, 1)
+ FIELD(GPI3, PL_GPI_16, 16, 1)
+ FIELD(GPI3, PL_GPI_15, 15, 1)
+ FIELD(GPI3, PL_GPI_14, 14, 1)
+ FIELD(GPI3, PL_GPI_13, 13, 1)
+ FIELD(GPI3, PL_GPI_12, 12, 1)
+ FIELD(GPI3, PL_GPI_11, 11, 1)
+ FIELD(GPI3, PL_GPI_10, 10, 1)
+ FIELD(GPI3, PL_GPI_9, 9, 1)
+ FIELD(GPI3, PL_GPI_8, 8, 1)
+ FIELD(GPI3, PL_GPI_7, 7, 1)
+ FIELD(GPI3, PL_GPI_6, 6, 1)
+ FIELD(GPI3, PL_GPI_5, 5, 1)
+ FIELD(GPI3, PL_GPI_4, 4, 1)
+ FIELD(GPI3, PL_GPI_3, 3, 1)
+ FIELD(GPI3, PL_GPI_2, 2, 1)
+ FIELD(GPI3, PL_GPI_1, 1, 1)
+ FIELD(GPI3, PL_GPI_0, 0, 1)
+REG32(IRQ_STATUS, 0x30)
+ FIELD(IRQ_STATUS, CSU_PMU_SEC_LOCK, 31, 1)
+ FIELD(IRQ_STATUS, INV_ADDR, 29, 1)
+ FIELD(IRQ_STATUS, PWR_DN_REQ, 28, 1)
+ FIELD(IRQ_STATUS, PWR_UP_REQ, 27, 1)
+ FIELD(IRQ_STATUS, SW_RST_REQ, 26, 1)
+ FIELD(IRQ_STATUS, HW_RST_REQ, 25, 1)
+ FIELD(IRQ_STATUS, ISO_REQ, 24, 1)
+ FIELD(IRQ_STATUS, FW_REQ, 23, 1)
+ FIELD(IRQ_STATUS, IPI3, 22, 1)
+ FIELD(IRQ_STATUS, IPI2, 21, 1)
+ FIELD(IRQ_STATUS, IPI1, 20, 1)
+ FIELD(IRQ_STATUS, IPI0, 19, 1)
+ FIELD(IRQ_STATUS, RTC_ALARM, 18, 1)
+ FIELD(IRQ_STATUS, RTC_EVERY_SECOND, 17, 1)
+ FIELD(IRQ_STATUS, CORRECTABLE_ECC, 16, 1)
+ FIELD(IRQ_STATUS, GPI3, 14, 1)
+ FIELD(IRQ_STATUS, GPI2, 13, 1)
+ FIELD(IRQ_STATUS, GPI1, 12, 1)
+ FIELD(IRQ_STATUS, GPI0, 11, 1)
+ FIELD(IRQ_STATUS, PIT3, 6, 1)
+ FIELD(IRQ_STATUS, PIT2, 5, 1)
+ FIELD(IRQ_STATUS, PIT1, 4, 1)
+ FIELD(IRQ_STATUS, PIT0, 3, 1)
+REG32(IRQ_PENDING, 0x34)
+ FIELD(IRQ_PENDING, CSU_PMU_SEC_LOCK, 31, 1)
+ FIELD(IRQ_PENDING, INV_ADDR, 29, 1)
+ FIELD(IRQ_PENDING, PWR_DN_REQ, 28, 1)
+ FIELD(IRQ_PENDING, PWR_UP_REQ, 27, 1)
+ FIELD(IRQ_PENDING, SW_RST_REQ, 26, 1)
+ FIELD(IRQ_PENDING, HW_RST_REQ, 25, 1)
+ FIELD(IRQ_PENDING, ISO_REQ, 24, 1)
+ FIELD(IRQ_PENDING, FW_REQ, 23, 1)
+ FIELD(IRQ_PENDING, IPI3, 22, 1)
+ FIELD(IRQ_PENDING, IPI2, 21, 1)
+ FIELD(IRQ_PENDING, IPI1, 20, 1)
+ FIELD(IRQ_PENDING, IPI0, 19, 1)
+ FIELD(IRQ_PENDING, RTC_ALARM, 18, 1)
+ FIELD(IRQ_PENDING, RTC_EVERY_SECOND, 17, 1)
+ FIELD(IRQ_PENDING, CORRECTABLE_ECC, 16, 1)
+ FIELD(IRQ_PENDING, GPI3, 14, 1)
+ FIELD(IRQ_PENDING, GPI2, 13, 1)
+ FIELD(IRQ_PENDING, GPI1, 12, 1)
+ FIELD(IRQ_PENDING, GPI0, 11, 1)
+ FIELD(IRQ_PENDING, PIT3, 6, 1)
+ FIELD(IRQ_PENDING, PIT2, 5, 1)
+ FIELD(IRQ_PENDING, PIT1, 4, 1)
+ FIELD(IRQ_PENDING, PIT0, 3, 1)
+REG32(IRQ_ENABLE, 0x38)
+ FIELD(IRQ_ENABLE, CSU_PMU_SEC_LOCK, 31, 1)
+ FIELD(IRQ_ENABLE, INV_ADDR, 29, 1)
+ FIELD(IRQ_ENABLE, PWR_DN_REQ, 28, 1)
+ FIELD(IRQ_ENABLE, PWR_UP_REQ, 27, 1)
+ FIELD(IRQ_ENABLE, SW_RST_REQ, 26, 1)
+ FIELD(IRQ_ENABLE, HW_RST_REQ, 25, 1)
+ FIELD(IRQ_ENABLE, ISO_REQ, 24, 1)
+ FIELD(IRQ_ENABLE, FW_REQ, 23, 1)
+ FIELD(IRQ_ENABLE, IPI3, 22, 1)
+ FIELD(IRQ_ENABLE, IPI2, 21, 1)
+ FIELD(IRQ_ENABLE, IPI1, 20, 1)
+ FIELD(IRQ_ENABLE, IPI0, 19, 1)
+ FIELD(IRQ_ENABLE, RTC_ALARM, 18, 1)
+ FIELD(IRQ_ENABLE, RTC_EVERY_SECOND, 17, 1)
+ FIELD(IRQ_ENABLE, CORRECTABLE_ECC, 16, 1)
+ FIELD(IRQ_ENABLE, GPI3, 14, 1)
+ FIELD(IRQ_ENABLE, GPI2, 13, 1)
+ FIELD(IRQ_ENABLE, GPI1, 12, 1)
+ FIELD(IRQ_ENABLE, GPI0, 11, 1)
+ FIELD(IRQ_ENABLE, PIT3, 6, 1)
+ FIELD(IRQ_ENABLE, PIT2, 5, 1)
+ FIELD(IRQ_ENABLE, PIT1, 4, 1)
+ FIELD(IRQ_ENABLE, PIT0, 3, 1)
+REG32(IRQ_ACK, 0x3c)
+ FIELD(IRQ_ACK, CSU_PMU_SEC_LOCK, 31, 1)
+ FIELD(IRQ_ACK, INV_ADDR, 29, 1)
+ FIELD(IRQ_ACK, PWR_DN_REQ, 28, 1)
+ FIELD(IRQ_ACK, PWR_UP_REQ, 27, 1)
+ FIELD(IRQ_ACK, SW_RST_REQ, 26, 1)
+ FIELD(IRQ_ACK, HW_RST_REQ, 25, 1)
+ FIELD(IRQ_ACK, ISO_REQ, 24, 1)
+ FIELD(IRQ_ACK, FW_REQ, 23, 1)
+ FIELD(IRQ_ACK, IPI3, 22, 1)
+ FIELD(IRQ_ACK, IPI2, 21, 1)
+ FIELD(IRQ_ACK, IPI1, 20, 1)
+ FIELD(IRQ_ACK, IPI0, 19, 1)
+ FIELD(IRQ_ACK, RTC_ALARM, 18, 1)
+ FIELD(IRQ_ACK, RTC_EVERY_SECOND, 17, 1)
+ FIELD(IRQ_ACK, CORRECTABLE_ECC, 16, 1)
+ FIELD(IRQ_ACK, GPI3, 14, 1)
+ FIELD(IRQ_ACK, GPI2, 13, 1)
+ FIELD(IRQ_ACK, GPI1, 12, 1)
+ FIELD(IRQ_ACK, GPI0, 11, 1)
+ FIELD(IRQ_ACK, PIT3, 6, 1)
+ FIELD(IRQ_ACK, PIT2, 5, 1)
+ FIELD(IRQ_ACK, PIT1, 4, 1)
+ FIELD(IRQ_ACK, PIT0, 3, 1)
+REG32(PIT0_PRELOAD, 0x40)
+REG32(PIT0_COUNTER, 0x44)
+REG32(PIT0_CONTROL, 0x48)
+ FIELD(PIT0_CONTROL, PRELOAD, 1, 1)
+ FIELD(PIT0_CONTROL, EN, 0, 1)
+REG32(PIT1_PRELOAD, 0x50)
+REG32(PIT1_COUNTER, 0x54)
+REG32(PIT1_CONTROL, 0x58)
+ FIELD(PIT1_CONTROL, PRELOAD, 1, 1)
+ FIELD(PIT1_CONTROL, EN, 0, 1)
+REG32(PIT2_PRELOAD, 0x60)
+REG32(PIT2_COUNTER, 0x64)
+REG32(PIT2_CONTROL, 0x68)
+ FIELD(PIT2_CONTROL, PRELOAD, 1, 1)
+ FIELD(PIT2_CONTROL, EN, 0, 1)
+REG32(PIT3_PRELOAD, 0x70)
+REG32(PIT3_COUNTER, 0x74)
+REG32(PIT3_CONTROL, 0x78)
+ FIELD(PIT3_CONTROL, PRELOAD, 1, 1)
+ FIELD(PIT3_CONTROL, EN, 0, 1)
+
+static void xlnx_pmu_io_irq_update(XlnxPMUIOIntc *s)
+{
+ bool irq_out;
+
+ s->regs[R_IRQ_PENDING] = s->regs[R_IRQ_STATUS] & s->regs[R_IRQ_ENABLE];
+ irq_out = !!s->regs[R_IRQ_PENDING];
+
+ DB_PRINT("Setting IRQ output = %d\n", irq_out);
+
+ qemu_set_irq(s->parent_irq, irq_out);
+}
+
+static void xlnx_pmu_io_irq_enable_postw(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(reg->opaque);
+
+ xlnx_pmu_io_irq_update(s);
+}
+
+static void xlnx_pmu_io_irq_ack_postw(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(reg->opaque);
+ uint32_t val = val64;
+
+ /* Only clear */
+ val &= s->regs[R_IRQ_STATUS];
+ s->regs[R_IRQ_STATUS] ^= val;
+
+ /* Active level triggered interrupts stay high. */
+ s->regs[R_IRQ_STATUS] |= s->irq_raw & ~s->cfg.level_edge;
+
+ xlnx_pmu_io_irq_update(s);
+}
+
+static const RegisterAccessInfo xlnx_pmu_io_intc_regs_info[] = {
+ { .name = "IRQ_MODE", .addr = A_IRQ_MODE,
+ .rsvd = 0xffffffff,
+ },{ .name = "GPO0", .addr = A_GPO0,
+ },{ .name = "GPO1", .addr = A_GPO1,
+ .rsvd = 0xffffffc0,
+ },{ .name = "GPO2", .addr = A_GPO2,
+ .rsvd = 0xfffffc3f,
+ },{ .name = "GPO3", .addr = A_GPO3,
+ },{ .name = "GPI0", .addr = A_GPI0,
+ .rsvd = 0x300030,
+ .ro = 0xffcfffcf,
+ },{ .name = "GPI1", .addr = A_GPI1,
+ .rsvd = 0xf0e0000,
+ .ro = 0xf0f1ffff,
+ },{ .name = "GPI2", .addr = A_GPI2,
+ .rsvd = 0x1f00fcc0,
+ .ro = 0xe0ff033f,
+ },{ .name = "GPI3", .addr = A_GPI3,
+ .ro = 0xffffffff,
+ },{ .name = "IRQ_STATUS", .addr = A_IRQ_STATUS,
+ .rsvd = 0x40008787,
+ .ro = 0xbfff7878,
+ },{ .name = "IRQ_PENDING", .addr = A_IRQ_PENDING,
+ .rsvd = 0x40008787,
+ .ro = 0xdfff7ff8,
+ },{ .name = "IRQ_ENABLE", .addr = A_IRQ_ENABLE,
+ .rsvd = 0x40008787,
+ .ro = 0x7800,
+ .post_write = xlnx_pmu_io_irq_enable_postw,
+ },{ .name = "IRQ_ACK", .addr = A_IRQ_ACK,
+ .rsvd = 0x40008787,
+ .post_write = xlnx_pmu_io_irq_ack_postw,
+ },{ .name = "PIT0_PRELOAD", .addr = A_PIT0_PRELOAD,
+ .ro = 0xffffffff,
+ },{ .name = "PIT0_COUNTER", .addr = A_PIT0_COUNTER,
+ .ro = 0xffffffff,
+ },{ .name = "PIT0_CONTROL", .addr = A_PIT0_CONTROL,
+ .rsvd = 0xfffffffc,
+ },{ .name = "PIT1_PRELOAD", .addr = A_PIT1_PRELOAD,
+ .ro = 0xffffffff,
+ },{ .name = "PIT1_COUNTER", .addr = A_PIT1_COUNTER,
+ .ro = 0xffffffff,
+ },{ .name = "PIT1_CONTROL", .addr = A_PIT1_CONTROL,
+ .rsvd = 0xfffffffc,
+ },{ .name = "PIT2_PRELOAD", .addr = A_PIT2_PRELOAD,
+ .ro = 0xffffffff,
+ },{ .name = "PIT2_COUNTER", .addr = A_PIT2_COUNTER,
+ .ro = 0xffffffff,
+ },{ .name = "PIT2_CONTROL", .addr = A_PIT2_CONTROL,
+ .rsvd = 0xfffffffc,
+ },{ .name = "PIT3_PRELOAD", .addr = A_PIT3_PRELOAD,
+ .ro = 0xffffffff,
+ },{ .name = "PIT3_COUNTER", .addr = A_PIT3_COUNTER,
+ .ro = 0xffffffff,
+ },{ .name = "PIT3_CONTROL", .addr = A_PIT3_CONTROL,
+ .rsvd = 0xfffffffc,
+ }
+};
+
+static void irq_handler(void *opaque, int irq, int level)
+{
+ XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(opaque);
+ uint32_t mask = 1 << irq;
+ uint32_t prev = s->irq_raw;
+ uint32_t temp;
+
+ s->irq_raw &= ~mask;
+ s->irq_raw |= (!!level) << irq;
+
+ /* Turn active-low into active-high. */
+ s->irq_raw ^= (~s->cfg.positive);
+ s->irq_raw &= mask;
+
+ if (s->cfg.level_edge & mask) {
+ /* Edge triggered. */
+ temp = (prev ^ s->irq_raw) & s->irq_raw;
+ } else {
+ /* Level triggered. */
+ temp = s->irq_raw;
+ }
+ s->regs[R_IRQ_STATUS] |= temp;
+
+ xlnx_pmu_io_irq_update(s);
+}
+
+static void xlnx_pmu_io_intc_reset(DeviceState *dev)
+{
+ XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
+ register_reset(&s->regs_info[i]);
+ }
+
+ xlnx_pmu_io_irq_update(s);
+}
+
+static const MemoryRegionOps xlnx_pmu_io_intc_ops = {
+ .read = register_read_memory,
+ .write = register_write_memory,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static Property xlnx_pmu_io_intc_properties[] = {
+ DEFINE_PROP_UINT32("intc-intr-size", XlnxPMUIOIntc, cfg.intr_size, 0),
+ DEFINE_PROP_UINT32("intc-level-edge", XlnxPMUIOIntc, cfg.level_edge, 0),
+ DEFINE_PROP_UINT32("intc-positive", XlnxPMUIOIntc, cfg.positive, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xlnx_pmu_io_intc_realize(DeviceState *dev, Error **errp)
+{
+ XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(dev);
+
+ /* Internal interrupts are edge triggered */
+ s->cfg.level_edge <<= 16;
+ s->cfg.level_edge |= 0xffff;
+
+ /* Internal interrupts are positive. */
+ s->cfg.positive <<= 16;
+ s->cfg.positive |= 0xffff;
+
+ /* Max 16 external interrupts. */
+ assert(s->cfg.intr_size <= 16);
+
+ qdev_init_gpio_in(dev, irq_handler, 16 + s->cfg.intr_size);
+}
+
+static void xlnx_pmu_io_intc_init(Object *obj)
+{
+ XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ RegisterInfoArray *reg_array;
+
+ memory_region_init(&s->iomem, obj, TYPE_XLNX_PMU_IO_INTC,
+ XLNXPMUIOINTC_R_MAX * 4);
+ reg_array =
+ register_init_block32(DEVICE(obj), xlnx_pmu_io_intc_regs_info,
+ ARRAY_SIZE(xlnx_pmu_io_intc_regs_info),
+ s->regs_info, s->regs,
+ &xlnx_pmu_io_intc_ops,
+ XLNX_PMU_IO_INTC_ERR_DEBUG,
+ XLNXPMUIOINTC_R_MAX * 4);
+ memory_region_add_subregion(&s->iomem,
+ 0x0,
+ &reg_array->mem);
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ sysbus_init_irq(sbd, &s->parent_irq);
+}
+
+static const VMStateDescription vmstate_xlnx_pmu_io_intc = {
+ .name = TYPE_XLNX_PMU_IO_INTC,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, XlnxPMUIOIntc, XLNXPMUIOINTC_R_MAX),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static void xlnx_pmu_io_intc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = xlnx_pmu_io_intc_reset;
+ dc->realize = xlnx_pmu_io_intc_realize;
+ dc->vmsd = &vmstate_xlnx_pmu_io_intc;
+ device_class_set_props(dc, xlnx_pmu_io_intc_properties);
+}
+
+static const TypeInfo xlnx_pmu_io_intc_info = {
+ .name = TYPE_XLNX_PMU_IO_INTC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxPMUIOIntc),
+ .class_init = xlnx_pmu_io_intc_class_init,
+ .instance_init = xlnx_pmu_io_intc_init,
+};
+
+static void xlnx_pmu_io_intc_register_types(void)
+{
+ type_register_static(&xlnx_pmu_io_intc_info);
+}
+
+type_init(xlnx_pmu_io_intc_register_types)
diff --git a/hw/intc/xlnx-zynqmp-ipi.c b/hw/intc/xlnx-zynqmp-ipi.c
new file mode 100644
index 000000000..adc117901
--- /dev/null
+++ b/hw/intc/xlnx-zynqmp-ipi.c
@@ -0,0 +1,380 @@
+/*
+ * QEMU model of the IPI Inter Processor Interrupt block
+ *
+ * Copyright (c) 2014 Xilinx Inc.
+ *
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ * Written by Alistair Francis <alistair.francis@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "hw/register.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "hw/intc/xlnx-zynqmp-ipi.h"
+#include "hw/irq.h"
+
+#ifndef XLNX_ZYNQMP_IPI_ERR_DEBUG
+#define XLNX_ZYNQMP_IPI_ERR_DEBUG 0
+#endif
+
+#define DB_PRINT_L(lvl, fmt, args...) do {\
+ if (XLNX_ZYNQMP_IPI_ERR_DEBUG >= lvl) {\
+ qemu_log(TYPE_XLNX_ZYNQMP_IPI ": %s:" fmt, __func__, ## args);\
+ } \
+} while (0)
+
+#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args)
+
+REG32(IPI_TRIG, 0x0)
+ FIELD(IPI_TRIG, PL_3, 27, 1)
+ FIELD(IPI_TRIG, PL_2, 26, 1)
+ FIELD(IPI_TRIG, PL_1, 25, 1)
+ FIELD(IPI_TRIG, PL_0, 24, 1)
+ FIELD(IPI_TRIG, PMU_3, 19, 1)
+ FIELD(IPI_TRIG, PMU_2, 18, 1)
+ FIELD(IPI_TRIG, PMU_1, 17, 1)
+ FIELD(IPI_TRIG, PMU_0, 16, 1)
+ FIELD(IPI_TRIG, RPU_1, 9, 1)
+ FIELD(IPI_TRIG, RPU_0, 8, 1)
+ FIELD(IPI_TRIG, APU, 0, 1)
+REG32(IPI_OBS, 0x4)
+ FIELD(IPI_OBS, PL_3, 27, 1)
+ FIELD(IPI_OBS, PL_2, 26, 1)
+ FIELD(IPI_OBS, PL_1, 25, 1)
+ FIELD(IPI_OBS, PL_0, 24, 1)
+ FIELD(IPI_OBS, PMU_3, 19, 1)
+ FIELD(IPI_OBS, PMU_2, 18, 1)
+ FIELD(IPI_OBS, PMU_1, 17, 1)
+ FIELD(IPI_OBS, PMU_0, 16, 1)
+ FIELD(IPI_OBS, RPU_1, 9, 1)
+ FIELD(IPI_OBS, RPU_0, 8, 1)
+ FIELD(IPI_OBS, APU, 0, 1)
+REG32(IPI_ISR, 0x10)
+ FIELD(IPI_ISR, PL_3, 27, 1)
+ FIELD(IPI_ISR, PL_2, 26, 1)
+ FIELD(IPI_ISR, PL_1, 25, 1)
+ FIELD(IPI_ISR, PL_0, 24, 1)
+ FIELD(IPI_ISR, PMU_3, 19, 1)
+ FIELD(IPI_ISR, PMU_2, 18, 1)
+ FIELD(IPI_ISR, PMU_1, 17, 1)
+ FIELD(IPI_ISR, PMU_0, 16, 1)
+ FIELD(IPI_ISR, RPU_1, 9, 1)
+ FIELD(IPI_ISR, RPU_0, 8, 1)
+ FIELD(IPI_ISR, APU, 0, 1)
+REG32(IPI_IMR, 0x14)
+ FIELD(IPI_IMR, PL_3, 27, 1)
+ FIELD(IPI_IMR, PL_2, 26, 1)
+ FIELD(IPI_IMR, PL_1, 25, 1)
+ FIELD(IPI_IMR, PL_0, 24, 1)
+ FIELD(IPI_IMR, PMU_3, 19, 1)
+ FIELD(IPI_IMR, PMU_2, 18, 1)
+ FIELD(IPI_IMR, PMU_1, 17, 1)
+ FIELD(IPI_IMR, PMU_0, 16, 1)
+ FIELD(IPI_IMR, RPU_1, 9, 1)
+ FIELD(IPI_IMR, RPU_0, 8, 1)
+ FIELD(IPI_IMR, APU, 0, 1)
+REG32(IPI_IER, 0x18)
+ FIELD(IPI_IER, PL_3, 27, 1)
+ FIELD(IPI_IER, PL_2, 26, 1)
+ FIELD(IPI_IER, PL_1, 25, 1)
+ FIELD(IPI_IER, PL_0, 24, 1)
+ FIELD(IPI_IER, PMU_3, 19, 1)
+ FIELD(IPI_IER, PMU_2, 18, 1)
+ FIELD(IPI_IER, PMU_1, 17, 1)
+ FIELD(IPI_IER, PMU_0, 16, 1)
+ FIELD(IPI_IER, RPU_1, 9, 1)
+ FIELD(IPI_IER, RPU_0, 8, 1)
+ FIELD(IPI_IER, APU, 0, 1)
+REG32(IPI_IDR, 0x1c)
+ FIELD(IPI_IDR, PL_3, 27, 1)
+ FIELD(IPI_IDR, PL_2, 26, 1)
+ FIELD(IPI_IDR, PL_1, 25, 1)
+ FIELD(IPI_IDR, PL_0, 24, 1)
+ FIELD(IPI_IDR, PMU_3, 19, 1)
+ FIELD(IPI_IDR, PMU_2, 18, 1)
+ FIELD(IPI_IDR, PMU_1, 17, 1)
+ FIELD(IPI_IDR, PMU_0, 16, 1)
+ FIELD(IPI_IDR, RPU_1, 9, 1)
+ FIELD(IPI_IDR, RPU_0, 8, 1)
+ FIELD(IPI_IDR, APU, 0, 1)
+
+/* APU
+ * RPU_0
+ * RPU_1
+ * PMU_0
+ * PMU_1
+ * PMU_2
+ * PMU_3
+ * PL_0
+ * PL_1
+ * PL_2
+ * PL_3
+ */
+int index_array[NUM_IPIS] = {0, 8, 9, 16, 17, 18, 19, 24, 25, 26, 27};
+static const char *index_array_names[NUM_IPIS] = {"APU", "RPU_0", "RPU_1",
+ "PMU_0", "PMU_1", "PMU_2",
+ "PMU_3", "PL_0", "PL_1",
+ "PL_2", "PL_3"};
+
+static void xlnx_zynqmp_ipi_set_trig(XlnxZynqMPIPI *s, uint32_t val)
+{
+ int i, ipi_index, ipi_mask;
+
+ for (i = 0; i < NUM_IPIS; i++) {
+ ipi_index = index_array[i];
+ ipi_mask = (1 << ipi_index);
+ DB_PRINT("Setting %s=%d\n", index_array_names[i],
+ !!(val & ipi_mask));
+ qemu_set_irq(s->irq_trig_out[i], !!(val & ipi_mask));
+ }
+}
+
+static void xlnx_zynqmp_ipi_set_obs(XlnxZynqMPIPI *s, uint32_t val)
+{
+ int i, ipi_index, ipi_mask;
+
+ for (i = 0; i < NUM_IPIS; i++) {
+ ipi_index = index_array[i];
+ ipi_mask = (1 << ipi_index);
+ DB_PRINT("Setting %s=%d\n", index_array_names[i],
+ !!(val & ipi_mask));
+ qemu_set_irq(s->irq_obs_out[i], !!(val & ipi_mask));
+ }
+}
+
+static void xlnx_zynqmp_ipi_update_irq(XlnxZynqMPIPI *s)
+{
+ bool pending = s->regs[R_IPI_ISR] & ~s->regs[R_IPI_IMR];
+
+ DB_PRINT("irq=%d isr=%x mask=%x\n",
+ pending, s->regs[R_IPI_ISR], s->regs[R_IPI_IMR]);
+ qemu_set_irq(s->irq, pending);
+}
+
+static uint64_t xlnx_zynqmp_ipi_trig_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque);
+
+ xlnx_zynqmp_ipi_set_trig(s, val64);
+
+ return val64;
+}
+
+static void xlnx_zynqmp_ipi_trig_postw(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque);
+
+ /* TRIG generates a pulse on the outbound signals. We use the
+ * post-write callback to bring the signal back-down.
+ */
+ s->regs[R_IPI_TRIG] = 0;
+
+ xlnx_zynqmp_ipi_set_trig(s, 0);
+}
+
+static uint64_t xlnx_zynqmp_ipi_isr_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque);
+
+ xlnx_zynqmp_ipi_set_obs(s, val64);
+
+ return val64;
+}
+
+static void xlnx_zynqmp_ipi_isr_postw(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque);
+
+ xlnx_zynqmp_ipi_update_irq(s);
+}
+
+static uint64_t xlnx_zynqmp_ipi_ier_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_IPI_IMR] &= ~val;
+ xlnx_zynqmp_ipi_update_irq(s);
+ return 0;
+}
+
+static uint64_t xlnx_zynqmp_ipi_idr_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_IPI_IMR] |= val;
+ xlnx_zynqmp_ipi_update_irq(s);
+ return 0;
+}
+
+static const RegisterAccessInfo xlnx_zynqmp_ipi_regs_info[] = {
+ { .name = "IPI_TRIG", .addr = A_IPI_TRIG,
+ .rsvd = 0xf0f0fcfe,
+ .ro = 0xf0f0fcfe,
+ .pre_write = xlnx_zynqmp_ipi_trig_prew,
+ .post_write = xlnx_zynqmp_ipi_trig_postw,
+ },{ .name = "IPI_OBS", .addr = A_IPI_OBS,
+ .rsvd = 0xf0f0fcfe,
+ .ro = 0xffffffff,
+ },{ .name = "IPI_ISR", .addr = A_IPI_ISR,
+ .rsvd = 0xf0f0fcfe,
+ .ro = 0xf0f0fcfe,
+ .w1c = 0xf0f0301,
+ .pre_write = xlnx_zynqmp_ipi_isr_prew,
+ .post_write = xlnx_zynqmp_ipi_isr_postw,
+ },{ .name = "IPI_IMR", .addr = A_IPI_IMR,
+ .reset = 0xf0f0301,
+ .rsvd = 0xf0f0fcfe,
+ .ro = 0xffffffff,
+ },{ .name = "IPI_IER", .addr = A_IPI_IER,
+ .rsvd = 0xf0f0fcfe,
+ .ro = 0xf0f0fcfe,
+ .pre_write = xlnx_zynqmp_ipi_ier_prew,
+ },{ .name = "IPI_IDR", .addr = A_IPI_IDR,
+ .rsvd = 0xf0f0fcfe,
+ .ro = 0xf0f0fcfe,
+ .pre_write = xlnx_zynqmp_ipi_idr_prew,
+ }
+};
+
+static void xlnx_zynqmp_ipi_reset(DeviceState *dev)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
+ register_reset(&s->regs_info[i]);
+ }
+
+ xlnx_zynqmp_ipi_update_irq(s);
+}
+
+static void xlnx_zynqmp_ipi_handler(void *opaque, int n, int level)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(opaque);
+ uint32_t val = (!!level) << n;
+
+ DB_PRINT("IPI input irq[%d]=%d\n", n, level);
+
+ s->regs[R_IPI_ISR] |= val;
+ xlnx_zynqmp_ipi_set_obs(s, s->regs[R_IPI_ISR]);
+ xlnx_zynqmp_ipi_update_irq(s);
+}
+
+static void xlnx_zynqmp_obs_handler(void *opaque, int n, int level)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(opaque);
+
+ DB_PRINT("OBS input irq[%d]=%d\n", n, level);
+
+ s->regs[R_IPI_OBS] &= ~(1ULL << n);
+ s->regs[R_IPI_OBS] |= (level << n);
+}
+
+static const MemoryRegionOps xlnx_zynqmp_ipi_ops = {
+ .read = register_read_memory,
+ .write = register_write_memory,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void xlnx_zynqmp_ipi_realize(DeviceState *dev, Error **errp)
+{
+ qdev_init_gpio_in_named(dev, xlnx_zynqmp_ipi_handler, "IPI_INPUTS", 32);
+ qdev_init_gpio_in_named(dev, xlnx_zynqmp_obs_handler, "OBS_INPUTS", 32);
+}
+
+static void xlnx_zynqmp_ipi_init(Object *obj)
+{
+ XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(obj);
+ DeviceState *dev = DEVICE(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ RegisterInfoArray *reg_array;
+ char *irq_name;
+ int i;
+
+ memory_region_init(&s->iomem, obj, TYPE_XLNX_ZYNQMP_IPI,
+ R_XLNX_ZYNQMP_IPI_MAX * 4);
+ reg_array =
+ register_init_block32(DEVICE(obj), xlnx_zynqmp_ipi_regs_info,
+ ARRAY_SIZE(xlnx_zynqmp_ipi_regs_info),
+ s->regs_info, s->regs,
+ &xlnx_zynqmp_ipi_ops,
+ XLNX_ZYNQMP_IPI_ERR_DEBUG,
+ R_XLNX_ZYNQMP_IPI_MAX * 4);
+ memory_region_add_subregion(&s->iomem,
+ 0x0,
+ &reg_array->mem);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->irq);
+
+ for (i = 0; i < NUM_IPIS; i++) {
+ qdev_init_gpio_out_named(dev, &s->irq_trig_out[i],
+ index_array_names[i], 1);
+
+ irq_name = g_strdup_printf("OBS_%s", index_array_names[i]);
+ qdev_init_gpio_out_named(dev, &s->irq_obs_out[i],
+ irq_name, 1);
+ g_free(irq_name);
+ }
+}
+
+static const VMStateDescription vmstate_zynqmp_pmu_ipi = {
+ .name = TYPE_XLNX_ZYNQMP_IPI,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPIPI, R_XLNX_ZYNQMP_IPI_MAX),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static void xlnx_zynqmp_ipi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = xlnx_zynqmp_ipi_reset;
+ dc->realize = xlnx_zynqmp_ipi_realize;
+ dc->vmsd = &vmstate_zynqmp_pmu_ipi;
+}
+
+static const TypeInfo xlnx_zynqmp_ipi_info = {
+ .name = TYPE_XLNX_ZYNQMP_IPI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxZynqMPIPI),
+ .class_init = xlnx_zynqmp_ipi_class_init,
+ .instance_init = xlnx_zynqmp_ipi_init,
+};
+
+static void xlnx_zynqmp_ipi_register_types(void)
+{
+ type_register_static(&xlnx_zynqmp_ipi_info);
+}
+
+type_init(xlnx_zynqmp_ipi_register_types)